From c1f6b8877c41b2fcaf7bf7ae03ccf512e2be5002 Mon Sep 17 00:00:00 2001 From: keruch Date: Mon, 1 Jul 2024 21:01:57 +0200 Subject: [PATCH] feat(da): SubmitBatchV2 method --- da/da.go | 29 ++ da/interchain/chain_client.go | 20 ++ da/interchain/config.go | 30 ++- da/interchain/interchain.go | 124 ++------- da/interchain/ioutils/gzip.go | 62 +++++ da/interchain/ioutils/gzip_test.go | 61 +++++ da/interchain/retrieve_batches.go | 11 + da/interchain/submit_batch.go | 150 +++++++++++ go.mod | 11 +- go.sum | 23 +- proto/types/dymint/dymint.proto | 1 - proto/types/interchain_da/da.proto | 15 ++ types/pb/interchain_da/da.go | 13 - types/pb/interchain_da/da.pb.go | 409 +++++++++++++++++++++++++++-- types/pb/interchain_da/keys.go | 51 ++++ 15 files changed, 859 insertions(+), 151 deletions(-) create mode 100644 da/interchain/ioutils/gzip.go create mode 100644 da/interchain/ioutils/gzip_test.go create mode 100644 da/interchain/retrieve_batches.go create mode 100644 da/interchain/submit_batch.go delete mode 100644 types/pb/interchain_da/da.go create mode 100644 types/pb/interchain_da/keys.go diff --git a/da/da.go b/da/da.go index 7e5545c42..940164d6b 100644 --- a/da/da.go +++ b/da/da.go @@ -8,6 +8,7 @@ import ( "github.com/celestiaorg/celestia-openrpc/types/blob" "github.com/cometbft/cometbft/crypto/merkle" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/tendermint/tendermint/libs/pubsub" "github.com/dymensionxyz/dymint/store" @@ -179,6 +180,13 @@ type ResultSubmitBatch struct { SubmitMetaData *DASubmitMetaData } +// ResultSubmitBatchV2 contains information returned from DA layer after block submission. +type ResultSubmitBatchV2 struct { + BaseResult + // DAPath instructs how to retrieve the submitted batch from the DA layer. + DAPath Path +} + // ResultCheckBatch contains information about block availability, returned from DA layer client. type ResultCheckBatch struct { BaseResult @@ -196,6 +204,22 @@ type ResultRetrieveBatch struct { CheckMetaData *DACheckMetaData } +// ResultRetrieveBatchV2 contains a batch of blocks returned from the DA layer client. +type ResultRetrieveBatchV2 struct { + BaseResult + // Batches is the full block retrieved from the DA layer. + // If BaseResult.Code is not StatusSuccess, this field is nil. + Batches []*types.Batch +} + +// Path TODO: move to the Dymension proto file +type Path struct { + // DAType identifies the DA type being used by the sequencer to post the blob. + DaType string + // Commitment is a generic commitment interpreted by the DA Layer. + Commitment *cdctypes.Any +} + // DataAvailabilityLayerClient defines generic interface for DA layer block submission. // It also contains life-cycle methods. type DataAvailabilityLayerClient interface { @@ -213,6 +237,9 @@ type DataAvailabilityLayerClient interface { // triggers a state transition in the DA layer. SubmitBatch(batch *types.Batch) ResultSubmitBatch + // SubmitBatchV2 is a method that supports MsgUpdateStateV2. + SubmitBatchV2(*types.Batch) ResultSubmitBatchV2 + GetClientType() Client // CheckBatchAvailability checks the availability of the blob submitted getting proofs and validating them @@ -226,6 +253,8 @@ type DataAvailabilityLayerClient interface { type BatchRetriever interface { // RetrieveBatches returns blocks at given data layer height from data availability layer. RetrieveBatches(daMetaData *DASubmitMetaData) ResultRetrieveBatch + // RetrieveBatchesV2 is a method that supports MsgUpdateStateV2. + RetrieveBatchesV2(ResultSubmitBatchV2) ResultRetrieveBatchV2 // CheckBatchAvailability checks the availability of the blob received getting proofs and validating them CheckBatchAvailability(daMetaData *DASubmitMetaData) ResultCheckBatch } diff --git a/da/interchain/chain_client.go b/da/interchain/chain_client.go index a2814342c..3fef37c66 100644 --- a/da/interchain/chain_client.go +++ b/da/interchain/chain_client.go @@ -7,6 +7,9 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/dymensionxyz/cosmosclient/cosmosclient" "github.com/ignite/cli/ignite/pkg/cosmosaccount" + "github.com/tendermint/tendermint/libs/bytes" + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" interchainda "github.com/dymensionxyz/dymint/types/pb/interchain_da" ) @@ -43,3 +46,20 @@ func (c *daClient) Params(ctx context.Context) (interchainda.Params, error) { } return resp.GetParams(), nil } + +func (c *daClient) Tx(ctx context.Context, txHash []byte) (*ctypes.ResultTx, error) { + return c.RPC.Tx(ctx, txHash, false) +} + +func (c *daClient) ABCIQueryWithProof( + ctx context.Context, + path string, + data bytes.HexBytes, + height int64, +) (*ctypes.ResultABCIQuery, error) { + opts := rpcclient.ABCIQueryOptions{ + Height: height, + Prove: true, + } + return c.RPC.ABCIQueryWithOptions(ctx, path, data, opts) +} diff --git a/da/interchain/config.go b/da/interchain/config.go index cf8f982c6..adb12b592 100644 --- a/da/interchain/config.go +++ b/da/interchain/config.go @@ -1,12 +1,15 @@ package interchain import ( + "time" + interchainda "github.com/dymensionxyz/dymint/types/pb/interchain_da" ) type DAConfig struct { - ClientID string `mapstructure:"client_id"` // This is the IBC client ID on Dymension hub for the DA chain - ChainID string `mapstructure:"chain_id"` // The chain ID of the DA chain + ClientID string `mapstructure:"client_id"` // IBC client ID between the Hub and DA layer + ChainID string `mapstructure:"chain_id"` // Chain ID of the DA layer + KeyringBackend string `mapstructure:"keyring_backend"` KeyringHomeDir string `mapstructure:"keyring_home_dir"` AddressPrefix string `mapstructure:"address_prefix"` @@ -16,4 +19,27 @@ type DAConfig struct { GasPrices string `mapstructure:"gas_prices"` GasFees string `mapstructure:"gas_fees"` DAParams interchainda.Params `mapstructure:"da_params"` + + RetryMinDelay time.Duration `mapstructure:"retry_min_delay"` + RetryMaxDelay time.Duration `mapstructure:"retry_min_delay"` + RetryAttempts uint `mapstructure:"retry_attempts"` +} + +func DefaultDAConfig() DAConfig { + return DAConfig{ + ClientID: "", + ChainID: "", + KeyringBackend: "", + KeyringHomeDir: "", + AddressPrefix: "", + AccountName: "", + NodeAddress: "", + GasLimit: 0, + GasPrices: "", + GasFees: "", + DAParams: interchainda.Params{}, + RetryMinDelay: 100 * time.Millisecond, + RetryMaxDelay: 2 * time.Second, + RetryAttempts: 10, + } } diff --git a/da/interchain/interchain.go b/da/interchain/interchain.go index b56e3b814..df507b4a4 100644 --- a/da/interchain/interchain.go +++ b/da/interchain/interchain.go @@ -11,28 +11,31 @@ import ( cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/dymensionxyz/cosmosclient/cosmosclient" + "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/pubsub" + ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/dymensionxyz/dymint/da" - "github.com/dymensionxyz/dymint/settlement/dymension" "github.com/dymensionxyz/dymint/store" "github.com/dymensionxyz/dymint/types" interchainda "github.com/dymensionxyz/dymint/types/pb/interchain_da" ) var ( - _ da.DataAvailabilityLayerClient = &DataAvailabilityLayerClient{} - _ da.BatchRetriever = &DataAvailabilityLayerClient{} + _ da.DataAvailabilityLayerClient = &DALayerClient{} + _ da.BatchRetriever = &DALayerClient{} ) type DAClient interface { Context() sdkclient.Context BroadcastTx(accountName string, msgs ...sdk.Msg) (cosmosclient.Response, error) Params(ctx context.Context) (interchainda.Params, error) + Tx(ctx context.Context, txHash []byte) (*ctypes.ResultTx, error) + ABCIQueryWithProof(ctx context.Context, path string, data bytes.HexBytes, height int64) (*ctypes.ResultABCIQuery, error) } -// DataAvailabilityLayerClient is a client for DA-provider blockchains supporting the interchain-da module. -type DataAvailabilityLayerClient struct { +// DALayerClient is a client for DA-provider blockchains supporting the interchain-da module. +type DALayerClient struct { logger types.Logger ctx context.Context cancel context.CancelFunc @@ -46,7 +49,7 @@ type DataAvailabilityLayerClient struct { } // Init is called once. It reads the DA client configuration and initializes resources for the interchain DA provider. -func (c *DataAvailabilityLayerClient) Init(rawConfig []byte, server *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { +func (c *DALayerClient) Init(rawConfig []byte, _ *pubsub.Server, _ store.KV, logger types.Logger, options ...da.Option) error { ctx := context.Background() // Read DA layer config @@ -84,7 +87,6 @@ func (c *DataAvailabilityLayerClient) Init(rawConfig []byte, server *pubsub.Serv c.cancel = cancel c.cdc = cdc c.synced = make(chan struct{}) - c.pubsubServer = server c.daClient = client c.daConfig = config @@ -96,120 +98,30 @@ func (c *DataAvailabilityLayerClient) Init(rawConfig []byte, server *pubsub.Serv return nil } -// Start is called once, after Init. It starts the operation of DataAvailabilityLayerClient, and Dymint will start submitting batches to the provider. +// Start is called once, after Init. It starts the operation of DALayerClient, and Dymint will start submitting batches to the provider. // It fetches the latest interchain module parameters and sets up a subscription to receive updates when the provider updates these parameters. // This ensures that the client is always up-to-date. -func (c *DataAvailabilityLayerClient) Start() error { - // Get the connectionID from the dymension hub for the da chain - c.daConfig.ClientID = dymension.(c.chainConfig.ChainID) - - // Setup a subscription to event EventUpdateParams - c.grpc.Subscribe(func() { - // This event is thrown at the end of the block when the module params are updated - if block.event == EventUpdateParams { - // when the chain params are updated, update the client config to reflect the same - da.chainConfig.chainParams = block.event.new_params - } - }) +func (c *DALayerClient) Start() error { + // TODO: Setup a subscription to event EventUpdateParams + return nil } -// Stop is called once, when DataAvailabilityLayerClient is no longer needed. -func (c *DataAvailabilityLayerClient) Stop() error { +// Stop is called once, when DALayerClient is no longer needed. +func (c *DALayerClient) Stop() error { c.pubsubServer.Stop() c.cancel() return nil } // Synced returns channel for on sync event -func (c *DataAvailabilityLayerClient) Synced() <-chan struct{} { +func (c *DALayerClient) Synced() <-chan struct{} { return c.synced } -func (c *DataAvailabilityLayerClient) GetClientType() da.Client { +func (c *DALayerClient) GetClientType() da.Client { return da.Interchain } -func (c *DataAvailabilityLayerClient) SubmitBatch(batch *types.Batch) da.ResultSubmitBatch { - result, err := c.submitBatch(batch) - if err != nil { - return da.ResultSubmitBatch{ - BaseResult: da.BaseResult{ - Code: da.StatusError, - Message: err.Error(), - Error: err, - }, - SubmitMetaData: nil, - } - } - return da.ResultSubmitBatch{ - BaseResult: da.BaseResult{ - Code: da.StatusSuccess, - Message: "Submission successful", - }, - SubmitMetaData: &da.DASubmitMetaData{ - Height: height, - Namespace: c.config.NamespaceID.Bytes(), - Client: da.Celestia, - Commitment: commitment, - Index: 0, - Length: 0, - Root: nil, - }, - } -} - -type submitBatchResult struct { - BlobID uint64 - BlobHash string -} - -func (c *DataAvailabilityLayerClient) submitBatch(batch *types.Batch) (submitBatchResult, error) { - blob, err := batch.MarshalBinary() - if err != nil { - return submitBatchResult{}, fmt.Errorf("can't marshal batch: %w", err) - } - - if len(blob) > int(c.daConfig.DAParams.MaxBlobSize) { - return submitBatchResult{}, fmt.Errorf("blob size %d exceeds the maximum allowed size %d", len(blob), c.daConfig.DAParams.MaxBlobSize) - } - - feesToPay := sdk.NewCoin(c.daConfig.DAParams.CostPerByte.Denom, c.daConfig.DAParams.CostPerByte.Amount.MulRaw(int64(len(blob)))) - - msg := interchainda.MsgSubmitBlob{ - Creator: c.daConfig.AccountName, - Blob: blob, - Fees: feesToPay, - } - - txResp, err := c.daClient.BroadcastTx(c.daConfig.AccountName, &msg) - if err != nil { - return submitBatchResult{}, fmt.Errorf("can't broadcast MsgSubmitBlob to the DA layer: %w", err) - } - if txResp.Code != 0 { - return submitBatchResult{}, fmt.Errorf("MsgSubmitBlob broadcast tx status code is not 0: code %d", txResp.Code) - } - - var resp interchainda.MsgSubmitBlobResponse - err = txResp.Decode(&resp) - if err != nil { - return submitBatchResult{}, fmt.Errorf("can't decode MsgSubmitBlob response: %w", err) - } - - // trigger ibc stateupdate - optional (?) - // other ibc interactions would trigger this anyway. But until then, inclusion cannot be verified. - // better to trigger a stateupdate now imo - dymension.tx.ibc.client.updatestate(c.daConfig.clientID) // could import the go relayer and execute their funcs - - return submitBatchResult{ - BlobID: resp.BlobId, - BlobHash: resp.BlobHash, - }, nil -} - -func (c *DataAvailabilityLayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { - panic("implement me") -} - -func (c *DataAvailabilityLayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { +func (c *DALayerClient) CheckBatchAvailability(daMetaData *da.DASubmitMetaData) da.ResultCheckBatch { panic("implement me") } diff --git a/da/interchain/ioutils/gzip.go b/da/interchain/ioutils/gzip.go new file mode 100644 index 000000000..713ec799e --- /dev/null +++ b/da/interchain/ioutils/gzip.go @@ -0,0 +1,62 @@ +package ioutils + +import ( + "bytes" + "compress/gzip" + "io" +) + +// Note: []byte can never be const as they are inherently mutable +var ( + // magic bytes to identify gzip. + // See https://www.ietf.org/rfc/rfc1952.txt + // and https://github.com/golang/go/blob/master/src/net/http/sniff.go#L186 + gzipIdent = []byte("\x1F\x8B\x08") +) + +// IsGzip returns checks if the file contents are gzip compressed +func IsGzip(input []byte) bool { + return len(input) >= 3 && bytes.Equal(gzipIdent, input[0:3]) +} + +// Gzip compresses the input ([]byte) +func Gzip(input []byte) ([]byte, error) { + // Create gzip writer + var b bytes.Buffer + w := gzip.NewWriter(&b) + + _, err := w.Write(input) + if err != nil { + return nil, err + } + + // You must close this first to flush the bytes to the buffer + err = w.Close() + if err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +// Gunzip decompresses the input ([]byte) +func Gunzip(input []byte) ([]byte, error) { + // Create gzip reader + b := bytes.NewReader(input) + r, err := gzip.NewReader(b) + if err != nil { + return nil, err + } + + output, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + err = r.Close() + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/da/interchain/ioutils/gzip_test.go b/da/interchain/ioutils/gzip_test.go new file mode 100644 index 000000000..8c9c916d8 --- /dev/null +++ b/da/interchain/ioutils/gzip_test.go @@ -0,0 +1,61 @@ +package ioutils_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/dymensionxyz/dymint/da/interchain/ioutils" +) + +func FuzzGzipGunzip(f *testing.F) { + f.Add([]byte("YW5vdGhlciBlbmNvZGUgc3RyaW5n")) // base64 string + f.Add([]byte("Different String!")) // plain string + f.Add([]byte("1234567890")) // numbers + f.Add([]byte{}) // empty slice + + f.Fuzz(func(t *testing.T, data []byte) { + // Encode to gzip + encoded, err := ioutils.Gzip(data) + require.NoError(t, err) + + // Verify if it's a gzip + ok := ioutils.IsGzip(encoded) + require.True(t, ok) + + // Decode from gzip + decoded, err := ioutils.Gunzip(encoded) + require.NoError(t, err) + + // Check if the resulted output is not a gzip + ok = ioutils.IsGzip(decoded) + require.False(t, ok) + + // Compare the original data against the output + require.Equal(t, data, decoded) + }) +} + +func TestGzipGunzip(t *testing.T) { + // Prepare the input + var expected = []byte("Hello world!") + + // Encode to gzip + encoded, err := ioutils.Gzip(expected) + require.NoError(t, err) + + // Check the output is correct + ok := ioutils.IsGzip(encoded) + require.True(t, ok) + + // Decode from gzip + decoded, err := ioutils.Gunzip(encoded) + require.NoError(t, err) + + // The output is not gzip anymore + ok = ioutils.IsGzip(decoded) + require.False(t, ok) + + // Compare the input against the output + require.Equal(t, expected, decoded) +} diff --git a/da/interchain/retrieve_batches.go b/da/interchain/retrieve_batches.go new file mode 100644 index 000000000..e965b7fd4 --- /dev/null +++ b/da/interchain/retrieve_batches.go @@ -0,0 +1,11 @@ +package interchain + +import "github.com/dymensionxyz/dymint/da" + +func (c *DALayerClient) RetrieveBatches(daMetaData *da.DASubmitMetaData) da.ResultRetrieveBatch { + panic("implement me") +} + +func (c *DALayerClient) RetrieveBatchesV2(da.ResultSubmitBatchV2) da.ResultRetrieveBatchV2 { + panic("implement me") +} diff --git a/da/interchain/submit_batch.go b/da/interchain/submit_batch.go new file mode 100644 index 000000000..0b97bc035 --- /dev/null +++ b/da/interchain/submit_batch.go @@ -0,0 +1,150 @@ +package interchain + +import ( + "fmt" + + "cosmossdk.io/collections" + collcodec "cosmossdk.io/collections/codec" + "github.com/avast/retry-go/v4" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/dymensionxyz/cosmosclient/cosmosclient" + + "github.com/dymensionxyz/dymint/da" + "github.com/dymensionxyz/dymint/da/interchain/ioutils" + "github.com/dymensionxyz/dymint/types" + interchainda "github.com/dymensionxyz/dymint/types/pb/interchain_da" +) + +func (c *DALayerClient) SubmitBatch(*types.Batch) da.ResultSubmitBatch { + panic("SubmitBatch method is not supported by the interchain DA clint") +} + +func (c *DALayerClient) SubmitBatchV2(batch *types.Batch) da.ResultSubmitBatchV2 { + commitment, err := c.submitBatch(batch) + if err != nil { + return da.ResultSubmitBatchV2{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("can't submit batch to the interchain DA layer: %s", err.Error()), + Error: err, + }, + DAPath: da.Path{}, // empty in the error resp + } + } + + rawCommitment, err := cdctypes.NewAnyWithValue(commitment) + if err != nil { + return da.ResultSubmitBatchV2{ + BaseResult: da.BaseResult{ + Code: da.StatusError, + Message: fmt.Sprintf("can't submit batch to the interchain DA layer: %s", err.Error()), + Error: err, + }, + DAPath: da.Path{}, // empty in the error resp + } + } + + // TODO: add MsgUpdateClint for DA<->Hub IBC client. + + return da.ResultSubmitBatchV2{ + BaseResult: da.BaseResult{ + Code: da.StatusSuccess, + Message: "Submission successful", + }, + DAPath: da.Path{ + DaType: string(c.GetClientType()), + Commitment: rawCommitment, + }, + } +} + +type submitBatchResult struct { + BlobID uint64 + BlobHash string +} + +func (c *DALayerClient) submitBatch(batch *types.Batch) (*interchainda.Commitment, error) { + blob, err := batch.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("can't marshal batch: %w", err) + } + + gzipped, err := ioutils.Gzip(blob) + if err != nil { + return nil, fmt.Errorf("can't gzip batch: %w", err) + } + + if len(blob) > int(c.daConfig.DAParams.MaxBlobSize) { + return nil, fmt.Errorf("blob size %d exceeds the maximum allowed size %d", len(blob), c.daConfig.DAParams.MaxBlobSize) + } + + feesToPay := sdk.NewCoin(c.daConfig.DAParams.CostPerByte.Denom, c.daConfig.DAParams.CostPerByte.Amount.MulRaw(int64(len(blob)))) + + msg := interchainda.MsgSubmitBlob{ + Creator: c.daConfig.AccountName, + Blob: gzipped, + Fees: feesToPay, + } + + var txResp cosmosclient.Response + err = c.runWithRetry(func() error { + txResp, err = c.broadcastTx(&msg) + return err + }) + if err != nil { + return nil, fmt.Errorf("can't broadcast MsgSubmitBlob to DA layer: %w", err) + } + + var resp interchainda.MsgSubmitBlobResponse + err = txResp.Decode(&resp) + if err != nil { + return nil, fmt.Errorf("can't decode MsgSubmitBlob response: %w", err) + } + + key, err := collections.EncodeKeyWithPrefix( + interchainda.BlobMetadataPrefix(), + collcodec.NewUint64Key[interchainda.BlobID](), + interchainda.BlobID(resp.BlobId), + ) + if err != nil { + return nil, fmt.Errorf("can't encode DA lakey store key: %w", err) + } + const keyPath = "/key" + abciResp, err := c.daClient.ABCIQueryWithProof(c.ctx, keyPath, key, txResp.Height) + if err != nil { + return nil, fmt.Errorf("can't call ABCI query with proof for the BlobID %d: %w", resp.BlobId, err) + } + + return &interchainda.Commitment{ + ClientId: c.daConfig.ClientID, + BlobHeight: uint64(txResp.Height), + BlobHash: resp.BlobHash, + BlobId: resp.BlobId, + MerkleProof: abciResp.Response.ProofOps, + }, nil +} + +func (c *DALayerClient) broadcastTx(msgs ...sdk.Msg) (cosmosclient.Response, error) { + txResp, err := c.daClient.BroadcastTx(c.daConfig.AccountName, msgs...) + if err != nil { + return cosmosclient.Response{}, fmt.Errorf("can't broadcast MsgSubmitBlob to the DA layer: %w", err) + } + if txResp.Code != 0 { + return cosmosclient.Response{}, fmt.Errorf("MsgSubmitBlob broadcast tx status code is not 0: code %d", txResp.Code) + } + return txResp, nil +} + +// runWithRetry runs the given operation with retry, doing a number of attempts, and taking the last error only. +func (c *DALayerClient) runWithRetry(operation func() error) error { + return retry.Do( + operation, + retry.Context(c.ctx), + retry.LastErrorOnly(true), + retry.Delay(c.daConfig.RetryMinDelay), + retry.Attempts(c.daConfig.RetryAttempts), + retry.MaxDelay(c.daConfig.RetryMaxDelay), + retry.DelayType(retry.BackOffDelay), + ) +} diff --git a/go.mod b/go.mod index 798fefc67..5c2a8207c 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.22.4 require ( code.cloudfoundry.org/go-diodes v0.0.0-20220725190411-383eb6634c40 + cosmossdk.io/collections v0.4.0 cosmossdk.io/errors v1.0.1 github.com/avast/retry-go/v4 v4.5.0 github.com/celestiaorg/celestia-openrpc v0.4.0-rc.1 @@ -62,7 +63,7 @@ require ( github.com/celestiaorg/rsmt2d v0.11.0 // indirect github.com/cometbft/cometbft v0.37.2 github.com/cometbft/cometbft-db v0.11.0 // indirect - github.com/cosmos/cosmos-proto v1.0.0-beta.3 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.3 github.com/cosmos/gogoproto v1.4.11 // indirect github.com/creachadair/taskgroup v0.3.2 // indirect github.com/deckarep/golang-set v1.8.0 // indirect @@ -243,8 +244,11 @@ require ( ) require ( + cosmossdk.io/api v0.7.0 // indirect + cosmossdk.io/core v0.10.0 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect cosmossdk.io/math v1.3.0 // indirect - github.com/DataDog/zstd v1.5.2 // indirect + github.com/DataDog/zstd v1.5.5 // indirect github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/btcsuite/btcd/btcutil v1.1.3 // indirect @@ -254,11 +258,12 @@ require ( github.com/cockroachdb/pebble v1.1.0 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect + github.com/cosmos/cosmos-db v1.0.0 // indirect github.com/cosmos/ibc-go/v6 v6.2.1 // indirect github.com/danwt/gerr v0.1.5 // indirect github.com/decred/dcrd/dcrec/edwards v1.0.0 // indirect github.com/evmos/evmos/v12 v12.1.6 // indirect - github.com/getsentry/sentry-go v0.18.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect github.com/holiman/uint256 v1.2.2 // indirect github.com/ipfs/boxo v0.10.0 // indirect diff --git a/go.sum b/go.sum index 7f6079248..317732f9e 100644 --- a/go.sum +++ b/go.sum @@ -49,6 +49,14 @@ cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/o cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= code.cloudfoundry.org/go-diodes v0.0.0-20220725190411-383eb6634c40 h1:wzkYwwcf4uMGcDpn48WAbq8GtoqDny49tdQ4zJVAsmo= code.cloudfoundry.org/go-diodes v0.0.0-20220725190411-383eb6634c40/go.mod h1:Nx9ASXN4nIlRDEXv+qXE3dpuhnTnO28Lxl/bMUd6BMc= +cosmossdk.io/api v0.7.0 h1:QsEMIWuv9xWDbF2HZnW4Lpu1/SejCztPu0LQx7t6MN4= +cosmossdk.io/api v0.7.0/go.mod h1:kJFAEMLN57y0viszHDPLMmieF0471o5QAwwApa+270M= +cosmossdk.io/collections v0.4.0 h1:PFmwj2W8szgpD5nOd8GWH6AbYNi1f2J6akWXJ7P5t9s= +cosmossdk.io/collections v0.4.0/go.mod h1:oa5lUING2dP+gdDquow+QjlF45eL1t4TJDypgGd+tv0= +cosmossdk.io/core v0.10.0 h1:NP28Ol9YyRODmZLJg2ko/mUl40hMegeMzhJnG+XPkcY= +cosmossdk.io/core v0.10.0/go.mod h1:MygXNld9DvMgYY4yE76DM/mdZpgfeyRjy6FPjEEehlY= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= @@ -72,8 +80,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= -github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= +github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -225,6 +233,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-db v1.0.0 h1:EVcQZ+qYag7W6uorBKFPvX6gRjw6Uq2hIh4hCWjuQ0E= +github.com/cosmos/cosmos-db v1.0.0/go.mod h1:iBvi1TtqaedwLdcrZVYRSSCb6eSy61NLj4UNmdIgs0U= github.com/cosmos/cosmos-proto v1.0.0-beta.3 h1:VitvZ1lPORTVxkmF2fAp3IiA61xVwArQYKXTdEcpW6o= github.com/cosmos/cosmos-proto v1.0.0-beta.3/go.mod h1:t8IASdLaAq+bbHbjq4p960BvcTqtwuAxid3b/2rOD6I= github.com/cosmos/cosmos-sdk v0.46.16 h1:RVGv1+RulLZeNyfCaPZrZtv0kY7ZZNAI6JGpub0Uh6o= @@ -353,8 +363,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= -github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -1514,6 +1524,9 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= +gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1527,6 +1540,8 @@ lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/proto/types/dymint/dymint.proto b/proto/types/dymint/dymint.proto index 625f63d95..8512fffbd 100755 --- a/proto/types/dymint/dymint.proto +++ b/proto/types/dymint/dymint.proto @@ -84,5 +84,4 @@ message Batch { uint64 end_height = 2; repeated Block blocks = 3; repeated Commit commits = 4; - } diff --git a/proto/types/interchain_da/da.proto b/proto/types/interchain_da/da.proto index a41f5b35b..706848fa4 100644 --- a/proto/types/interchain_da/da.proto +++ b/proto/types/interchain_da/da.proto @@ -6,6 +6,7 @@ package dymension.interchain_da; import "gogoproto/gogo.proto"; import "types/cosmos/base/v1beta1/coin.proto"; +import "types/tendermint/crypto/proof.proto"; option go_package = "github.com/dymensionxyz/dymint/types/pb/interchain_da"; @@ -25,3 +26,17 @@ message BlobMetadata { // BlobHash is the hash of the submitted blob. string blob_hash = 1; } + +// Commitment defines the commitment type used by the InterchainDALayer. +message Commitment { + // ClientID identifies the client_id of the DA chain where the blob was posted. + string client_id = 1; + // BlobHeight identifies the height at which the blob was posted. + uint64 blob_height = 2; + // BlobHash is the hash of the submitted blob. + string blob_hash = 3; + // BlobID is the unique ID of the blob. + uint64 blob_id = 4; + // MerkleProof is a merkle inclusion proof of the blob. + tendermint.crypto.ProofOps merkle_proof = 5; +} diff --git a/types/pb/interchain_da/da.go b/types/pb/interchain_da/da.go deleted file mode 100644 index c1bdb3dfc..000000000 --- a/types/pb/interchain_da/da.go +++ /dev/null @@ -1,13 +0,0 @@ -package interchain_da - -import sdk "github.com/cosmos/cosmos-sdk/types" - -func (m MsgSubmitBlob) ValidateBasic() error { - // Validation is done on the DA layer side - return nil -} - -func (m MsgSubmitBlob) GetSigners() []sdk.AccAddress { - signer, _ := sdk.AccAddressFromBech32(m.Creator) - return []sdk.AccAddress{signer} -} diff --git a/types/pb/interchain_da/da.pb.go b/types/pb/interchain_da/da.pb.go index 0f7829bca..bf9e95b88 100644 --- a/types/pb/interchain_da/da.pb.go +++ b/types/pb/interchain_da/da.pb.go @@ -8,6 +8,7 @@ import ( types "github.com/cosmos/cosmos-sdk/types" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" io "io" math "math" math_bits "math/bits" @@ -135,36 +136,126 @@ func (m *BlobMetadata) GetBlobHash() string { return "" } +// Commitment defines the commitment type used by the InterchainDALayer. +type Commitment struct { + // ClientID identifies the client_id of the DA chain where the blob was posted. + ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // BlobHeight identifies the height at which the blob was posted. + BlobHeight uint64 `protobuf:"varint,2,opt,name=blob_height,json=blobHeight,proto3" json:"blob_height,omitempty"` + // BlobHash is the hash of the submitted blob. + BlobHash string `protobuf:"bytes,3,opt,name=blob_hash,json=blobHash,proto3" json:"blob_hash,omitempty"` + // BlobID is the unique ID of the blob. + BlobId uint64 `protobuf:"varint,4,opt,name=blob_id,json=blobId,proto3" json:"blob_id,omitempty"` + // MerkleProof is a merkle inclusion proof of the blob. + MerkleProof *crypto.ProofOps `protobuf:"bytes,5,opt,name=merkle_proof,json=merkleProof,proto3" json:"merkle_proof,omitempty"` +} + +func (m *Commitment) Reset() { *m = Commitment{} } +func (m *Commitment) String() string { return proto.CompactTextString(m) } +func (*Commitment) ProtoMessage() {} +func (*Commitment) Descriptor() ([]byte, []int) { + return fileDescriptor_c9a26af1837c1a56, []int{2} +} +func (m *Commitment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Commitment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Commitment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Commitment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Commitment.Merge(m, src) +} +func (m *Commitment) XXX_Size() int { + return m.Size() +} +func (m *Commitment) XXX_DiscardUnknown() { + xxx_messageInfo_Commitment.DiscardUnknown(m) +} + +var xxx_messageInfo_Commitment proto.InternalMessageInfo + +func (m *Commitment) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *Commitment) GetBlobHeight() uint64 { + if m != nil { + return m.BlobHeight + } + return 0 +} + +func (m *Commitment) GetBlobHash() string { + if m != nil { + return m.BlobHash + } + return "" +} + +func (m *Commitment) GetBlobId() uint64 { + if m != nil { + return m.BlobId + } + return 0 +} + +func (m *Commitment) GetMerkleProof() *crypto.ProofOps { + if m != nil { + return m.MerkleProof + } + return nil +} + func init() { proto.RegisterType((*Params)(nil), "dymension.interchain_da.Params") proto.RegisterType((*BlobMetadata)(nil), "dymension.interchain_da.BlobMetadata") + proto.RegisterType((*Commitment)(nil), "dymension.interchain_da.Commitment") } func init() { proto.RegisterFile("types/interchain_da/da.proto", fileDescriptor_c9a26af1837c1a56) } var fileDescriptor_c9a26af1837c1a56 = []byte{ - // 325 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xc1, 0x4a, 0xc3, 0x30, - 0x1c, 0xc6, 0x1b, 0x1d, 0xc3, 0x75, 0xd6, 0x43, 0x11, 0x9c, 0x53, 0xea, 0x18, 0x0a, 0x03, 0x21, - 0x61, 0x8a, 0x2f, 0xd0, 0x5d, 0xbc, 0x88, 0xa3, 0xde, 0xbc, 0x94, 0x7f, 0xda, 0xb0, 0x06, 0xd6, - 0xfc, 0x4b, 0x93, 0xc9, 0xba, 0xa7, 0xf0, 0xe8, 0x23, 0xed, 0xb8, 0xa3, 0x27, 0x91, 0xed, 0x45, - 0x24, 0xed, 0x10, 0x76, 0x4b, 0x3e, 0xbe, 0xef, 0x97, 0xf0, 0x73, 0xaf, 0x4d, 0x55, 0x08, 0xcd, - 0xa4, 0x32, 0xa2, 0x4c, 0x32, 0x90, 0x2a, 0x4e, 0x81, 0xa5, 0x40, 0x8b, 0x12, 0x0d, 0xfa, 0x17, - 0x69, 0x95, 0x0b, 0xa5, 0x25, 0x2a, 0x7a, 0xd0, 0xe8, 0x9f, 0xcf, 0x70, 0x86, 0x75, 0x87, 0xd9, - 0x53, 0x53, 0xef, 0xdf, 0x36, 0xb0, 0x04, 0x75, 0x8e, 0x9a, 0x71, 0xd0, 0x82, 0x7d, 0x8c, 0xb9, - 0x30, 0x30, 0x66, 0x09, 0x4a, 0xd5, 0xb4, 0x86, 0x5f, 0xc4, 0x6d, 0x4f, 0xa1, 0x84, 0x5c, 0xfb, - 0x13, 0xd7, 0x4b, 0x50, 0x9b, 0xb8, 0x10, 0x65, 0xcc, 0x2b, 0x23, 0x7a, 0x64, 0x40, 0x46, 0xdd, - 0x87, 0x4b, 0xda, 0x20, 0xa8, 0x45, 0xd0, 0x3d, 0x82, 0x4e, 0x50, 0xaa, 0xb0, 0xb5, 0xfe, 0xb9, - 0x71, 0xa2, 0xae, 0x5d, 0x4d, 0x45, 0x19, 0x56, 0x46, 0xf8, 0x43, 0xd7, 0xcb, 0x61, 0x19, 0xf3, - 0x39, 0xf2, 0x58, 0xcb, 0x95, 0xe8, 0x1d, 0x0d, 0xc8, 0xc8, 0x8b, 0xba, 0x39, 0x2c, 0xc3, 0x39, - 0xf2, 0x37, 0xb9, 0x12, 0xfe, 0x9d, 0x7b, 0x96, 0x4a, 0x5d, 0x2c, 0x8c, 0xb0, 0x6f, 0x49, 0x4c, - 0x7b, 0xc7, 0x03, 0x32, 0x6a, 0x45, 0xde, 0x3e, 0x9d, 0xd6, 0xe1, 0xf0, 0xde, 0x3d, 0xb5, 0x93, - 0x17, 0x61, 0x20, 0x05, 0x03, 0xfe, 0x95, 0xdb, 0xa9, 0xb1, 0x19, 0xe8, 0xac, 0xfe, 0x5b, 0x27, - 0x3a, 0xb1, 0xc1, 0x33, 0xe8, 0x2c, 0x7c, 0x5d, 0x6f, 0x03, 0xb2, 0xd9, 0x06, 0xe4, 0x77, 0x1b, - 0x90, 0xcf, 0x5d, 0xe0, 0x6c, 0x76, 0x81, 0xf3, 0xbd, 0x0b, 0x9c, 0xf7, 0xa7, 0x99, 0x34, 0xd9, - 0x82, 0xd3, 0x04, 0x73, 0xf6, 0x6f, 0x70, 0x59, 0xad, 0xec, 0x45, 0x2a, 0xc3, 0x1a, 0x4d, 0x05, - 0x3f, 0xd4, 0xce, 0xdb, 0xb5, 0x9f, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x87, 0xa2, 0x93, - 0xfe, 0x94, 0x01, 0x00, 0x00, + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x6e, 0x13, 0x31, + 0x10, 0x86, 0xb3, 0x34, 0x04, 0xea, 0x34, 0x1c, 0x56, 0x48, 0x5d, 0x5a, 0xb4, 0x8d, 0x02, 0x48, + 0x91, 0x90, 0xbc, 0x2a, 0x88, 0x2b, 0x87, 0xe4, 0x42, 0x0f, 0xa8, 0xd1, 0x72, 0xe3, 0xb2, 0xf2, + 0xae, 0x87, 0xac, 0x45, 0xec, 0x59, 0xad, 0xa7, 0x28, 0x9b, 0xa7, 0xe0, 0xc8, 0xe3, 0x70, 0xec, + 0xb1, 0x47, 0x4e, 0x08, 0x25, 0x2f, 0x82, 0x6c, 0x07, 0xaa, 0xdc, 0x3c, 0xbf, 0xbf, 0x99, 0xdf, + 0xf2, 0x3f, 0xec, 0x39, 0x75, 0x0d, 0xd8, 0x4c, 0x19, 0x82, 0xb6, 0xaa, 0x85, 0x32, 0x85, 0x14, + 0x99, 0x14, 0xbc, 0x69, 0x91, 0x30, 0x3e, 0x95, 0x9d, 0x06, 0x63, 0x15, 0x1a, 0x7e, 0x40, 0x9c, + 0x3d, 0x5d, 0xe2, 0x12, 0x3d, 0x93, 0xb9, 0x53, 0xc0, 0xcf, 0x5e, 0x86, 0x61, 0x15, 0x5a, 0x8d, + 0x36, 0x2b, 0x85, 0x85, 0xec, 0xdb, 0x65, 0x09, 0x24, 0x2e, 0xb3, 0x0a, 0x95, 0xd9, 0x53, 0x2f, + 0x02, 0x45, 0x60, 0x24, 0xb4, 0x5a, 0x19, 0xca, 0xaa, 0xb6, 0x6b, 0x08, 0xb3, 0xa6, 0x45, 0xfc, + 0x12, 0xa0, 0xc9, 0x8f, 0x88, 0x0d, 0x16, 0xa2, 0x15, 0xda, 0xc6, 0x73, 0x36, 0xaa, 0xd0, 0x52, + 0xd1, 0x40, 0x5b, 0x94, 0x1d, 0x41, 0x12, 0x8d, 0xa3, 0xe9, 0xf0, 0xcd, 0x33, 0x1e, 0x7c, 0xb8, + 0xf3, 0xe1, 0x7b, 0x1f, 0x3e, 0x47, 0x65, 0x66, 0xfd, 0xdb, 0xdf, 0x17, 0xbd, 0x7c, 0xe8, 0xba, + 0x16, 0xd0, 0xce, 0x3a, 0x82, 0x78, 0xc2, 0x46, 0x5a, 0xac, 0x8b, 0x72, 0x85, 0x65, 0x61, 0xd5, + 0x06, 0x92, 0x07, 0xe3, 0x68, 0x3a, 0xca, 0x87, 0x5a, 0xac, 0x67, 0x2b, 0x2c, 0x3f, 0xa9, 0x0d, + 0xc4, 0xaf, 0xd8, 0x13, 0xa9, 0x6c, 0x73, 0x43, 0xe0, 0xbc, 0x14, 0xca, 0xe4, 0x68, 0x1c, 0x4d, + 0xfb, 0xf9, 0x68, 0xaf, 0x2e, 0xbc, 0x38, 0x79, 0xcd, 0x4e, 0x5c, 0xcb, 0x47, 0x20, 0x21, 0x05, + 0x89, 0xf8, 0x9c, 0x1d, 0xfb, 0xb1, 0xb5, 0xb0, 0xb5, 0x7f, 0xdb, 0x71, 0xfe, 0xd8, 0x09, 0x1f, + 0x84, 0xad, 0x27, 0x3f, 0x23, 0xc6, 0xe6, 0xa8, 0xb5, 0x22, 0x0d, 0x86, 0x1c, 0x5b, 0xad, 0x14, + 0x18, 0x2a, 0x94, 0xfc, 0xc7, 0x06, 0xe1, 0x4a, 0xc6, 0x17, 0x6c, 0x18, 0x06, 0x81, 0x5a, 0xd6, + 0xe4, 0x5f, 0xd8, 0xcf, 0x99, 0x1f, 0xe5, 0x95, 0x43, 0xa7, 0xa3, 0x43, 0xa7, 0xf8, 0x94, 0x3d, + 0xf2, 0x97, 0x4a, 0x26, 0x7d, 0xdf, 0x39, 0x70, 0xe5, 0x95, 0x8c, 0xdf, 0xb3, 0x13, 0x0d, 0xed, + 0xd7, 0x15, 0x14, 0xfe, 0x83, 0x93, 0x87, 0xfe, 0xfb, 0xce, 0xf9, 0x7d, 0x00, 0x3c, 0x04, 0xc0, + 0x17, 0xee, 0xfe, 0xba, 0xb1, 0xf9, 0x30, 0x34, 0xf8, 0x7a, 0x76, 0x7d, 0xbb, 0x4d, 0xa3, 0xbb, + 0x6d, 0x1a, 0xfd, 0xd9, 0xa6, 0xd1, 0xf7, 0x5d, 0xda, 0xbb, 0xdb, 0xa5, 0xbd, 0x5f, 0xbb, 0xb4, + 0xf7, 0xf9, 0xdd, 0x52, 0x51, 0x7d, 0x53, 0xf2, 0x0a, 0x75, 0xf6, 0x7f, 0x53, 0xd6, 0xdd, 0xc6, + 0x15, 0x2e, 0xd7, 0x10, 0x74, 0x53, 0x1e, 0xae, 0x57, 0x39, 0xf0, 0x11, 0xbf, 0xfd, 0x1b, 0x00, + 0x00, 0xff, 0xff, 0xa6, 0x3a, 0x37, 0x27, 0x7c, 0x02, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -240,6 +331,65 @@ func (m *BlobMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Commitment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Commitment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Commitment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MerkleProof != nil { + { + size, err := m.MerkleProof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDa(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.BlobId != 0 { + i = encodeVarintDa(dAtA, i, uint64(m.BlobId)) + i-- + dAtA[i] = 0x20 + } + if len(m.BlobHash) > 0 { + i -= len(m.BlobHash) + copy(dAtA[i:], m.BlobHash) + i = encodeVarintDa(dAtA, i, uint64(len(m.BlobHash))) + i-- + dAtA[i] = 0x1a + } + if m.BlobHeight != 0 { + i = encodeVarintDa(dAtA, i, uint64(m.BlobHeight)) + i-- + dAtA[i] = 0x10 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintDa(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintDa(dAtA []byte, offset int, v uint64) int { offset -= sovDa(v) base := offset @@ -281,6 +431,33 @@ func (m *BlobMetadata) Size() (n int) { return n } +func (m *Commitment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovDa(uint64(l)) + } + if m.BlobHeight != 0 { + n += 1 + sovDa(uint64(m.BlobHeight)) + } + l = len(m.BlobHash) + if l > 0 { + n += 1 + l + sovDa(uint64(l)) + } + if m.BlobId != 0 { + n += 1 + sovDa(uint64(m.BlobId)) + } + if m.MerkleProof != nil { + l = m.MerkleProof.Size() + n += 1 + l + sovDa(uint64(l)) + } + return n +} + func sovDa(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -490,6 +667,194 @@ func (m *BlobMetadata) Unmarshal(dAtA []byte) error { } return nil } +func (m *Commitment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Commitment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Commitment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDa + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDa + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlobHeight", wireType) + } + m.BlobHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlobHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlobHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDa + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDa + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BlobHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlobId", wireType) + } + m.BlobId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlobId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MerkleProof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDa + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDa + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MerkleProof == nil { + m.MerkleProof = &crypto.ProofOps{} + } + if err := m.MerkleProof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDa(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipDa(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/types/pb/interchain_da/keys.go b/types/pb/interchain_da/keys.go new file mode 100644 index 000000000..bb98215fa --- /dev/null +++ b/types/pb/interchain_da/keys.go @@ -0,0 +1,51 @@ +package interchain_da + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func (m *MsgSubmitBlob) ValidateBasic() error { + // Tha validation occurs on the client side. + return nil +} + +func (m *MsgSubmitBlob) GetSigners() []sdk.AccAddress { + signer, _ := sdk.AccAddressFromBech32(m.Creator) + return []sdk.AccAddress{signer} +} + +type BlobID uint64 + +// Module name and store keys. +const ( + // ModuleName defines the module name + ModuleName = "interchain_da" + + ModuleNameCLI = "interchain-da" + + // StoreKey defines the primary module store key + StoreKey = ModuleName +) + +const ( + ParamsByte uint8 = iota + BlobIDByte + BlobMetadataByte + PruningHeightByte +) + +func ParamsPrefix() []byte { + return []byte{ParamsByte} +} + +func BlobIDPrefix() []byte { + return []byte{BlobIDByte} +} + +func BlobMetadataPrefix() []byte { + return []byte{BlobMetadataByte} +} + +func PruningHeightPrefix() []byte { + return []byte{PruningHeightByte} +}