diff --git a/bridge-history-api/internal/logic/history_logic.go b/bridge-history-api/internal/logic/history_logic.go index 0d97b6f0a1..ee52ce3ac5 100644 --- a/bridge-history-api/internal/logic/history_logic.go +++ b/bridge-history-api/internal/logic/history_logic.go @@ -407,7 +407,7 @@ func (h *HistoryLogic) cacheTxsInfo(ctx context.Context, cacheKey string, txs [] return err } } else { - // The transactions are sorted, thus we set the score as their indices. + // The transactions are sorted, thus we set the score as their index. for _, tx := range txs { txBytes, err := json.Marshal(tx) if err != nil { diff --git a/common/types/message/message.go b/common/types/message/message.go index 3c3bb23842..2b3f974056 100644 --- a/common/types/message/message.go +++ b/common/types/message/message.go @@ -234,7 +234,7 @@ type BatchProof struct { GitVersion string `json:"git_version,omitempty"` } -// SanityCheck checks whether an BatchProof is in a legal format +// SanityCheck checks whether a BatchProof is in a legal format func (ap *BatchProof) SanityCheck() error { if ap == nil { return errors.New("agg_proof is nil") @@ -243,23 +243,18 @@ func (ap *BatchProof) SanityCheck() error { if len(ap.Proof) == 0 { return errors.New("proof not ready") } + if len(ap.Proof)%32 != 0 { - return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof)) + return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof)) } if len(ap.Instances) == 0 { return errors.New("instance not ready") } - if len(ap.Instances)%32 != 0 { - return fmt.Errorf("instance buffer has wrong length, expected: 32, got: %d", len(ap.Instances)) - } if len(ap.Vk) == 0 { return errors.New("vk not ready") } - if len(ap.Vk)%32 != 0 { - return fmt.Errorf("vk buffer has wrong length, expected: 32, got: %d", len(ap.Vk)) - } return nil } @@ -272,3 +267,28 @@ type BundleProof struct { // cross-reference between cooridinator computation and prover compution GitVersion string `json:"git_version,omitempty"` } + +// SanityCheck checks whether a BundleProof is in a legal format +func (ap *BundleProof) SanityCheck() error { + if ap == nil { + return errors.New("agg_proof is nil") + } + + if len(ap.Proof) == 0 { + return errors.New("proof not ready") + } + + if len(ap.Proof)%32 != 0 { + return fmt.Errorf("proof buffer length must be a multiple of 32, got: %d", len(ap.Proof)) + } + + if len(ap.Instances) == 0 { + return errors.New("instance not ready") + } + + if len(ap.Vk) == 0 { + return errors.New("vk not ready") + } + + return nil +} diff --git a/coordinator/internal/orm/batch.go b/coordinator/internal/orm/batch.go index 0e70f97444..909b0d34ff 100644 --- a/coordinator/internal/orm/batch.go +++ b/coordinator/internal/orm/batch.go @@ -76,7 +76,7 @@ func (*Batch) TableName() string { } // GetUnassignedBatch retrieves unassigned batch based on the specified limit. -// The returned batch are sorted in ascending order by their index. +// The returned batches are sorted in ascending order by their index. func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { var batch Batch db := o.db.WithContext(ctx) @@ -93,7 +93,7 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, startChunkIndex, endChun } // GetAssignedBatch retrieves assigned batch based on the specified limit. -// The returned batch are sorted in ascending order by their index. +// The returned batches are sorted in ascending order by their index. func (o *Batch) GetAssignedBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) { var batch Batch db := o.db.WithContext(ctx) diff --git a/database/migrate/migrations/00021_bundle.sql b/database/migrate/migrations/00021_bundle.sql index 9e7dc91ff9..e42fcc58bc 100644 --- a/database/migrate/migrations/00021_bundle.sql +++ b/database/migrate/migrations/00021_bundle.sql @@ -3,11 +3,12 @@ CREATE TABLE bundle ( index BIGSERIAL PRIMARY KEY, - hash VARCHAR NOT NULL, -- Not part of DA hash, used for SQL query consistency and ease of use, derived using keccak256(concat(start_batch_hash, end_batch_hash)). + hash VARCHAR NOT NULL, -- Not part of DA hash, used for SQL query consistency and ease of use, derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)). start_batch_index BIGINT NOT NULL, end_batch_index BIGINT NOT NULL, start_batch_hash VARCHAR NOT NULL, end_batch_hash VARCHAR NOT NULL, + codec_version SMALLINT NOT NULL, -- proof batch_proofs_status SMALLINT NOT NULL DEFAULT 1, @@ -28,8 +29,13 @@ CREATE TABLE bundle ( deleted_at TIMESTAMP(0) DEFAULT NULL ); -CREATE INDEX bundle_start_batch_index_idx ON bundle (start_batch_index) WHERE deleted_at IS NULL; -CREATE INDEX bundle_end_batch_index_idx ON bundle (end_batch_index) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_index_rollup_status ON bundle(index, rollup_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_hash ON bundle(hash) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_hash_proving_status ON bundle(hash, proving_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_index_desc ON bundle(index DESC) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_batch_proofs_status ON bundle(batch_proofs_status) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_start_batch_index ON bundle(start_batch_index) WHERE deleted_at IS NULL; +CREATE INDEX idx_bundle_end_batch_index ON bundle(end_batch_index) WHERE deleted_at IS NULL; COMMENT ON COLUMN bundle.batch_proofs_status IS 'undefined, pending, ready'; COMMENT ON COLUMN bundle.proving_status IS 'undefined, unassigned, assigned, proved (deprecated), verified, failed'; diff --git a/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql b/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql new file mode 100644 index 0000000000..08a48fad6c --- /dev/null +++ b/database/migrate/migrations/00022_add_bundle_hash_and_codec_version_to_batch.sql @@ -0,0 +1,23 @@ +-- +goose Up +-- +goose StatementBegin + +ALTER TABLE batch +ADD COLUMN bundle_hash VARCHAR DEFAULT '', +ADD COLUMN codec_version SMALLINT DEFAULT 0; + +CREATE INDEX idx_batch_bundle_hash ON batch(bundle_hash); +CREATE INDEX idx_batch_index_codec_version ON batch(index, codec_version); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +DROP INDEX IF EXISTS idx_batch_bundle_hash; +DROP INDEX IF EXISTS idx_batch_index_codec_version; + +ALTER TABLE IF EXISTS batch +DROP COLUMN IF EXISTS bundle_hash, +DROP COLUMN IF EXISTS codec_version; + +-- +goose StatementEnd diff --git a/database/migrate/migrations/00022_add_bundle_hash_to_batch.sql b/database/migrate/migrations/00022_add_bundle_hash_to_batch.sql deleted file mode 100644 index 10f318003c..0000000000 --- a/database/migrate/migrations/00022_add_bundle_hash_to_batch.sql +++ /dev/null @@ -1,15 +0,0 @@ --- +goose Up --- +goose StatementBegin - -ALTER TABLE batch -ADD COLUMN bundle_hash VARCHAR DEFAULT ''; -- Adding bundle hash for SQL query consistency and ease of use - --- +goose StatementEnd - --- +goose Down --- +goose StatementBegin - -ALTER TABLE IF EXISTS batch -DROP COLUMN bundle_hash; - --- +goose StatementEnd diff --git a/rollup/abi/bridge_abi.go b/rollup/abi/bridge_abi.go index 8e58db048d..996655a7bf 100644 --- a/rollup/abi/bridge_abi.go +++ b/rollup/abi/bridge_abi.go @@ -66,7 +66,7 @@ func init() { // ScrollChainMetaData contains all meta data concerning the ScrollChain contract. var ScrollChainMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"oldMaxNumTxInChunk\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"newMaxNumTxInChunk\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxNumTxInChunk\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"status\",\"type\":\"bool\"}],\"name\":\"UpdateProver\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bool\",\"name\":\"status\",\"type\":\"bool\"}],\"name\":\"UpdateSequencer\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"blobDataProof\",\"type\":\"bytes\"}],\"name\":\"commitBatchWithBlobProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"finalizeBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blobDataProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatch4844\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"blobDataProof\",\"type\":\"bytes\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof4844\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBundleWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"_stateRoot\",\"type\":\"bytes32\"}],\"name\":\"importGenesisBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"lastFinalizedBatchIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", } // L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract. diff --git a/rollup/conf/config.json b/rollup/conf/config.json index 7f2a996600..babe46b583 100644 --- a/rollup/conf/config.json +++ b/rollup/conf/config.json @@ -58,6 +58,7 @@ }, "enable_test_env_bypass_features": true, "finalize_batch_without_proof_timeout_sec": 7200, + "finalize_bundle_without_proof_timeout_sec": 7200, "gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313", "commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414", "finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515", @@ -79,6 +80,10 @@ "batch_timeout_sec": 300, "gas_cost_increase_multiplier": 1.2, "max_uncompressed_batch_bytes_size": 634880 + }, + "bundle_proposer_config": { + "max_batch_num_per_bundle": 20, + "bundle_timeout_sec": 36000 } }, "db_config": { diff --git a/rollup/internal/config/l2.go b/rollup/internal/config/l2.go index 2b8cf6542c..593dfc2ff2 100644 --- a/rollup/internal/config/l2.go +++ b/rollup/internal/config/l2.go @@ -22,6 +22,8 @@ type L2Config struct { ChunkProposerConfig *ChunkProposerConfig `json:"chunk_proposer_config"` // The batch_proposer config BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"` + // The bundle_proposer config + BundleProposerConfig *BatchProposerConfig `json:"bundle_proposer_config"` } // ChunkProposerConfig loads chunk_proposer configuration items. @@ -44,3 +46,9 @@ type BatchProposerConfig struct { GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"` MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"` } + +// BundleProposerConfig loads bundle_proposer configuration items. +type BundleProposerConfig struct { + MaxBatchNumPerBundle uint64 `json:"max_batch_num_per_bundle"` + BundleTimeoutSec uint64 `json:"bundle_timeout_sec"` +} diff --git a/rollup/internal/config/relayer.go b/rollup/internal/config/relayer.go index d33d8a83ca..aa1fbeea8b 100644 --- a/rollup/internal/config/relayer.go +++ b/rollup/internal/config/relayer.go @@ -66,6 +66,8 @@ type RelayerConfig struct { EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"` // The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true. FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"` + // The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true. + FinalizeBundleWithoutProofTimeoutSec uint64 `json:"finalize_bundle_without_proof_timeout_sec"` } // GasOracleConfig The config for updating gas price oracle. diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index 984bdd9674..ab0210120c 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -2,9 +2,11 @@ package relayer import ( "context" + "errors" "fmt" "math/big" "sort" + "strings" "time" "github.com/go-resty/resty/v2" @@ -42,6 +44,7 @@ type Layer2Relayer struct { l2Client *ethclient.Client db *gorm.DB + bundleOrm *orm.Bundle batchOrm *orm.Batch chunkOrm *orm.Chunk l2BlockOrm *orm.L2Block @@ -121,6 +124,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. ctx: ctx, db: db, + bundleOrm: orm.NewBundle(db), batchOrm: orm.NewBatch(db), l2BlockOrm: orm.NewL2Block(db), chunkOrm: orm.NewChunk(db), @@ -414,14 +418,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() { "hash", dbBatch.Hash, "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, - ) - log.Debug( - "Failed to send commitBatch tx to layer1", - "index", dbBatch.Index, - "hash", dbBatch.Hash, - "RollupContractAddress", r.cfg.RollupContractAddress, "calldata", common.Bytes2Hex(calldata), - "err", err, ) return } @@ -501,8 +498,49 @@ func (r *Layer2Relayer) ProcessCommittedBatches() { } } +// ProcessPendingBundles submits proof to layer 1 rollup contract +func (r *Layer2Relayer) ProcessPendingBundles() { + r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc() + + bundle, err := r.bundleOrm.GetFirstPendingBundle(r.ctx) + if err != nil { + log.Error("Failed to fetch first pending L2 bundle", "err", err) + return + } + status := types.ProvingStatus(bundle.ProvingStatus) + switch status { + case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: + if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(bundle.CreatedAt) > time.Duration(r.cfg.FinalizeBundleWithoutProofTimeoutSec)*time.Second { + if err := r.finalizeBundle(bundle, false); err != nil { + log.Error("Failed to finalize timeout bundle without proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + } + } + + case types.ProvingTaskVerified: + log.Info("Start to roll up zk proof", "hash", bundle.Hash) + r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc() + if err := r.finalizeBundle(bundle, true); err != nil { + log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + } + + case types.ProvingTaskFailed: + // We were unable to prove this bundle. There are two possibilities: + // (a) Prover bug. In this case, we should fix and redeploy the prover. + // In the meantime, we continue to commit batches to L1 as well as + // proposing and proving chunks, batches and bundles. + // (b) Unprovable bundle, e.g. proof overflow. In this case we need to + // stop the ledger, fix the limit, revert all the violating blocks, + // chunks, batches, bundles and all subsequent ones, and resume, + // i.e. this case requires manual resolution. + log.Error("bundle proving failed", "index", bundle.Index, "hash", bundle.Hash, "prover assigned at", bundle.ProverAssignedAt, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec) + + default: + log.Error("encounter unreachable case in ProcessPendingBundles", "proving status", status) + } +} + func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error { - // Check batch status before send `finalizeBatch` tx. + // Check batch status before sending `finalizeBatch` tx. if r.cfg.ChainMonitor.Enabled { var batchStatus bool batchStatus, err := r.getBatchStatusByIndex(dbBatch) @@ -513,8 +551,8 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error } if !batchStatus { r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc() - log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", dbBatch.Index) - return err + log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", dbBatch.Index) + return errors.New("the batch status is false") } } @@ -548,7 +586,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error if !r.chainCfg.IsBernoulli(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv0 calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err) } } else if !r.chainCfg.IsCurie(new(big.Int).SetUint64(dbChunks[0].StartBlockNumber)) { // codecv1 chunks := make([]*encoding.Chunk, len(dbChunks)) @@ -562,9 +600,9 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error calldata, err = r.constructFinalizeBatchPayloadCodecV1(dbBatch, dbParentBatch, dbChunks, chunks, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) } - } else { // codecv2 + } else if !r.chainCfg.IsDarwin(dbChunks[0].StartBlockTime) { // codecv2 chunks := make([]*encoding.Chunk, len(dbChunks)) for i, c := range dbChunks { blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) @@ -576,8 +614,11 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error calldata, err = r.constructFinalizeBatchPayloadCodecV2(dbBatch, dbParentBatch, dbChunks, chunks, aggProof) if err != nil { - return fmt.Errorf("failed to construct commitBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) + return fmt.Errorf("failed to construct finalizeBatch payload codecv2, index: %v, err: %w", dbBatch.Index, err) } + } else { // codecv3 + log.Debug("encoding is codecv3, using finalizeBundle instead", "index", dbBatch.Index) + return nil } txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0) @@ -589,15 +630,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error "hash", dbBatch.Hash, "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, - ) - log.Debug( - "finalizeBatch in layer1 failed", - "with proof", withProof, - "index", dbBatch.Index, - "hash", dbBatch.Hash, - "RollupContractAddress", r.cfg.RollupContractAddress, "calldata", common.Bytes2Hex(calldata), - "err", err, ) return err } @@ -631,6 +664,98 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error return nil } +func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error { + // Check batch status before sending `finalizeBundle` tx. + if r.cfg.ChainMonitor.Enabled { + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) + if getErr != nil { + log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr) + return getErr + } + batchStatus, getErr := r.getBatchStatusByIndex(tmpBatch) + if getErr != nil { + r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc() + log.Error("failed to get batch status, please check chain_monitor api server", "batch_index", tmpBatch.Index, "err", getErr) + return getErr + } + if !batchStatus { + r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc() + log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", tmpBatch.Index) + return errors.New("the batch status is false") + } + } + } + + dbBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.EndBatchIndex) + if err != nil { + log.Error("failed to get batch by index", "batch index", bundle.EndBatchIndex, "error", err) + return err + } + + var aggProof *message.BundleProof + if withProof { + aggProof, err = r.bundleOrm.GetVerifiedProofByHash(r.ctx, bundle.Hash) + if err != nil { + return fmt.Errorf("failed to get verified proof by bundle index: %d, err: %w", bundle.Index, err) + } + + if err = aggProof.SanityCheck(); err != nil { + return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", bundle.Index, err) + } + } + + calldata, err := r.constructFinalizeBundlePayloadCodecV3(dbBatch, aggProof) + if err != nil { + return fmt.Errorf("failed to construct finalizeBundle payload codecv3, index: %v, err: %w", dbBatch.Index, err) + } + + txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0) + if err != nil { + log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index, + "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, + "RollupContractAddress", r.cfg.RollupContractAddress, "err", err, "calldata", common.Bytes2Hex(calldata)) + return err + } + + log.Info("finalizeBundle in layer1", "with proof", withProof, "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "tx hash", txHash.String()) + + // Updating rollup status in database. + if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing); err != nil { + log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err) + return err + } + + // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. + // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus + if !withProof { + txErr := r.db.Transaction(func(tx *gorm.DB) error { + if updateErr := r.bundleOrm.UpdateProvingStatus(r.ctx, bundle.Hash, types.ProvingTaskVerified); updateErr != nil { + return updateErr + } + if updateErr := r.batchOrm.UpdateProvingStatusByBundleHash(r.ctx, bundle.Hash, types.ProvingTaskVerified); updateErr != nil { + return updateErr + } + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) + if getErr != nil { + return getErr + } + if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, tmpBatch.Hash, types.ProvingTaskVerified); updateErr != nil { + return updateErr + } + } + return nil + }) + if txErr != nil { + log.Error("Updating chunk and batch proving status when finalizing without proof failure", "bundleHash", bundle.Hash, "err", txErr) + } + } + + r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal.Inc() + return nil +} + // batchStatusResponse the response schema type batchStatusResponse struct { ErrCode int `json:"errcode"` @@ -695,6 +820,36 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) { log.Warn("UpdateCommitTxHashAndRollupStatus failed", "confirmation", cfm, "err", err) } case types.SenderTypeFinalizeBatch: + if strings.HasPrefix(cfm.ContextID, "finalizeBundle-") { + bundleHash := strings.TrimPrefix(cfm.ContextID, "finalizeBundle-") + var status types.RollupStatus + if cfm.IsSuccessful { + status = types.RollupFinalized + r.metrics.rollupL2BundlesFinalizedConfirmedTotal.Inc() + } else { + status = types.RollupFinalizeFailed + r.metrics.rollupL2BundlesFinalizedConfirmedFailedTotal.Inc() + log.Warn("FinalizeBundleTxType transaction confirmed but failed in layer1", "confirmation", cfm) + } + + err := r.db.Transaction(func(dbTX *gorm.DB) error { + if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "confirmation", cfm, "err", err) + return err + } + + if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundleHash, cfm.TxHash.String(), status); err != nil { + log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err) + return err + } + return nil + }) + if err != nil { + log.Warn("failed to update rollup status of bundle and batches", "err", err) + } + return + } + var status types.RollupStatus if cfm.IsSuccessful { status = types.RollupFinalized @@ -1007,6 +1162,34 @@ func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV2(dbBatch *orm.Batch, return calldata, nil } +func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV3(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) { + if aggProof != nil { // finalizeBundle with proof. + calldata, packErr := r.l1RollupABI.Pack( + "finalizeBundleWithProof", + dbBatch.BatchHeader, + common.HexToHash(dbBatch.StateRoot), + common.HexToHash(dbBatch.WithdrawRoot), + aggProof.Proof, + ) + if packErr != nil { + return nil, fmt.Errorf("failed to pack finalizeBundleWithProof: %w", packErr) + } + return calldata, nil + } + + // finalizeBundle without proof. + calldata, packErr := r.l1RollupABI.Pack( + "finalizeBundle", + dbBatch.BatchHeader, + common.HexToHash(dbBatch.StateRoot), + common.HexToHash(dbBatch.WithdrawRoot), + ) + if packErr != nil { + return nil, fmt.Errorf("failed to pack finalizeBundle: %w", packErr) + } + return calldata, nil +} + // StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests. // for unit test func (r *Layer2Relayer) StopSenders() { diff --git a/rollup/internal/controller/relayer/l2_relayer_metrics.go b/rollup/internal/controller/relayer/l2_relayer_metrics.go index 0d03b69ad9..82b0f248f0 100644 --- a/rollup/internal/controller/relayer/l2_relayer_metrics.go +++ b/rollup/internal/controller/relayer/l2_relayer_metrics.go @@ -23,6 +23,11 @@ type l2RelayerMetrics struct { rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter rollupL2ChainMonitorLatestFailedCall prometheus.Counter rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter + rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter + rollupL2RelayerProcessPendingBundlesFinalizedTotal prometheus.Counter + rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal prometheus.Counter + rollupL2BundlesFinalizedConfirmedTotal prometheus.Counter + rollupL2BundlesFinalizedConfirmedFailedTotal prometheus.Counter } var ( @@ -93,6 +98,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics { Name: "rollup_layer2_chain_monitor_latest_failed_batch_status", Help: "The total number of failed batch status get from chain_monitor", }), + rollupL2RelayerProcessPendingBundlesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_total", + Help: "Total number of times the layer2 relayer has processed pending bundles.", + }), + rollupL2RelayerProcessPendingBundlesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_finalized_total", + Help: "Total number of times the layer2 relayer has finalized proven bundle processes.", + }), + rollupL2RelayerProcessPendingBundlesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_relayer_process_pending_bundles_finalized_success_total", + Help: "Total number of times the layer2 relayer has successful finalized proven bundle processes.", + }), + rollupL2BundlesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_bundles_finalized_confirmed_total", + Help: "Total number of finalized bundles confirmed on layer2.", + }), + rollupL2BundlesFinalizedConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_layer2_bundles_finalized_confirmed_failed_total", + Help: "Total number of failed confirmations for finalized bundles on layer2.", + }), } }) return l2RelayerMetric diff --git a/rollup/internal/controller/relayer/l2_relayer_test.go b/rollup/internal/controller/relayer/l2_relayer_test.go index c49135bf0e..c32a75d2e5 100644 --- a/rollup/internal/controller/relayer/l2_relayer_test.go +++ b/rollup/internal/controller/relayer/l2_relayer_test.go @@ -113,9 +113,13 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { defer database.CloseDB(db) l2Cfg := cfg.L2Config - chainConfig := ¶ms.ChainConfig{} + var chainConfig *params.ChainConfig if codecVersion == encoding.CodecV0 { - chainConfig.BernoulliBlock = big.NewInt(0) + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -171,6 +175,66 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { } } +func testL2RelayerProcessPendingBundles(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV3} + for _, codecVersion := range codecVersions { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + l2Cfg := cfg.L2Config + var chainConfig *params.ChainConfig + if codecVersion == encoding.CodecV3 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + } + relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + + batch := &encoding.Batch{ + Index: 1, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + batchOrm := orm.NewBatch(db) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) + assert.NoError(t, err) + + bundleOrm := orm.NewBundle(db) + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion) + assert.NoError(t, err) + + err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending) + assert.NoError(t, err) + + err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified) + assert.NoError(t, err) + + relayer.ProcessPendingBundles() + + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(bundles)) + // no valid proof, rollup status remains the same + assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus)) + + proof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600) + assert.NoError(t, err) + + relayer.ProcessPendingBundles() + bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(bundles)) + assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus)) + relayer.StopSenders() + } +} + func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} for _, codecVersion := range codecVersions { @@ -180,9 +244,13 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { l2Cfg := cfg.L2Config l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0 - chainConfig := ¶ms.ChainConfig{} + var chainConfig *params.ChainConfig if codecVersion == encoding.CodecV0 { - chainConfig.BernoulliBlock = big.NewInt(0) + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -213,29 +281,108 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) assert.NoError(t, err) - // Check the database for the updated status using TryTimes. - ok := utils.TryTimes(5, func() bool { + assert.Eventually(t, func() bool { relayer.ProcessCommittedBatches() - time.Sleep(time.Second) batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) if batchErr != nil { return false } + + batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && + types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified + chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) if chunkErr != nil { return false } - batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && - types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified - chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified return batchStatus && chunkStatus - }) - assert.True(t, ok) + }, 5*time.Second, 100*time.Millisecond, "Batch or Chunk status did not update as expected") + relayer.StopSenders() + } +} + +func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV3} + for _, codecVersion := range codecVersions { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + l2Cfg := cfg.L2Config + l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true + l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0 + var chainConfig *params.ChainConfig + if codecVersion == encoding.CodecV3 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + } + relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + chunkOrm := orm.NewChunk(db) + chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{}) + assert.NoError(t, err) + chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{}) + assert.NoError(t, err) + + batch := &encoding.Batch{ + Index: 1, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + batchOrm := orm.NewBatch(db) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) + assert.NoError(t, err) + + err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) + assert.NoError(t, err) + + err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) + assert.NoError(t, err) + + bundleOrm := orm.NewBundle(db) + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion) + assert.NoError(t, err) + + err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil) + assert.NoError(t, err) + + assert.Eventually(t, func() bool { + relayer.ProcessPendingBundles() + + bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0) + if bundleErr != nil { + return false + } + + bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing && + types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified + + batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) + if batchErr != nil { + return false + } + + batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified + + chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) + if chunkErr != nil { + return false + } + + chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && + types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified + + return bundleStatus && batchStatus && chunkStatus + }, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected") relayer.StopSenders() } } @@ -296,7 +443,7 @@ func testL2RelayerCommitConfirm(t *testing.T) { assert.True(t, ok) } -func testL2RelayerFinalizeConfirm(t *testing.T) { +func testL2RelayerFinalizeBatchConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer database.CloseDB(db) @@ -352,6 +499,75 @@ func testL2RelayerFinalizeConfirm(t *testing.T) { assert.True(t, ok) } +func testL2RelayerFinalizeBundleConfirm(t *testing.T) { + db := setupL2RelayerDB(t) + defer database.CloseDB(db) + + // Create and set up the Layer2 Relayer. + l2Cfg := cfg.L2Config + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil) + assert.NoError(t, err) + defer l2Relayer.StopSenders() + + // Simulate message confirmations. + isSuccessful := []bool{true, false} + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) + batchHashes := make([]string, len(isSuccessful)) + bundleHashes := make([]string, len(isSuccessful)) + for i := range batchHashes { + batch := &encoding.Batch{ + Index: uint64(i + 1), + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk1, chunk2}, + } + + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{}) + assert.NoError(t, err) + batchHashes[i] = dbBatch.Hash + + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV3) + assert.NoError(t, err) + bundleHashes[i] = bundle.Hash + + err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash) + assert.NoError(t, err) + } + + for i, bundleHash := range bundleHashes { + l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{ + ContextID: "finalizeBundle-" + bundleHash, + IsSuccessful: isSuccessful[i], + TxHash: common.HexToHash("0x123456789abcdef"), + SenderType: types.SenderTypeFinalizeBatch, + }) + } + + assert.Eventually(t, func() bool { + expectedStatuses := []types.RollupStatus{ + types.RollupFinalized, + types.RollupFinalizeFailed, + } + + for i, bundleHash := range bundleHashes { + bundleInDB, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundleHash}, nil, 0) + if err != nil || len(bundleInDB) != 1 || types.RollupStatus(bundleInDB[0].RollupStatus) != expectedStatuses[i] { + return false + } + + batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHashes[i]}, nil, 0) + if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] { + return false + } + } + + return true + }, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected") +} + func testL2RelayerGasOracleConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer database.CloseDB(db) diff --git a/rollup/internal/controller/relayer/relayer_test.go b/rollup/internal/controller/relayer/relayer_test.go index fe093e52f8..2908133472 100644 --- a/rollup/internal/controller/relayer/relayer_test.go +++ b/rollup/internal/controller/relayer/relayer_test.go @@ -124,11 +124,15 @@ func TestFunctions(t *testing.T) { t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) + t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles) t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches) + t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles) t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm) - t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm) + t.Run("TestL2RelayerFinalizeBatchConfirm", testL2RelayerFinalizeBatchConfirm) + t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) + // test getBatchStatusByIndex t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex) } diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index 76b8e03bf0..7d68cb7b5e 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -54,11 +54,12 @@ type BatchProposer struct { // NewBatchProposer creates a new BatchProposer instance. func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer { - log.Debug("new batch proposer", + log.Info("new batch proposer", "maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch, "maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch, "batchTimeoutSec", cfg.BatchTimeoutSec, "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier, + "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize) p := &BatchProposer{ diff --git a/rollup/internal/controller/watcher/batch_proposer_test.go b/rollup/internal/controller/watcher/batch_proposer_test.go index 77aa85e8c6..b86e13274d 100644 --- a/rollup/internal/controller/watcher/batch_proposer_test.go +++ b/rollup/internal/controller/watcher/batch_proposer_test.go @@ -26,7 +26,6 @@ func testBatchProposerCodecv0Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -133,10 +132,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) { MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, BatchTimeoutSec: tt.batchTimeoutSec, GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{ - HomesteadBlock: tt.forkBlock, - CurieBlock: big.NewInt(0), - }, db, nil) + }, ¶ms.ChainConfig{}, db, nil) bp.TryProposeBatch() batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) @@ -167,7 +163,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -278,7 +273,6 @@ func testBatchProposerCodecv1Limits(t *testing.T) { GasCostIncreaseMultiplier: 1.2, }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, }, db, nil) bp.TryProposeBatch() @@ -310,7 +304,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -425,7 +418,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) { }, ¶ms.ChainConfig{ BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), - HomesteadBlock: tt.forkBlock, }, db, nil) bp.TryProposeBatch() @@ -457,7 +449,6 @@ func testBatchProposerCodecv3Limits(t *testing.T) { maxL1CommitGas uint64 maxL1CommitCalldataSize uint64 batchTimeoutSec uint64 - forkBlock *big.Int expectedBatchesLen int expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 }{ @@ -553,7 +544,6 @@ func testBatchProposerCodecv3Limits(t *testing.T) { BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), - HomesteadBlock: tt.forkBlock, }, db, nil) cp.TryProposeChunk() // chunk1 contains block1 cp.TryProposeChunk() // chunk2 contains block2 @@ -986,7 +976,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: math.MaxUint64, + BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) @@ -1089,7 +1079,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: math.MaxUint64, + BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) @@ -1145,7 +1135,7 @@ func testBatchProposerRespectHardforks(t *testing.T) { MaxL1CommitGasPerChunk: math.MaxUint64, MaxL1CommitCalldataSizePerChunk: math.MaxUint64, MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: math.MaxUint64, + ChunkTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) @@ -1165,7 +1155,7 @@ func testBatchProposerRespectHardforks(t *testing.T) { bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: math.MaxUint64, + BatchTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) diff --git a/rollup/internal/controller/watcher/bundle_proposer.go b/rollup/internal/controller/watcher/bundle_proposer.go new file mode 100644 index 0000000000..d975ea3320 --- /dev/null +++ b/rollup/internal/controller/watcher/bundle_proposer.go @@ -0,0 +1,182 @@ +package watcher + +import ( + "context" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "gorm.io/gorm" + + "scroll-tech/common/forks" + + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/orm" +) + +// BundleProposer proposes bundles based on available unbundled batches. +type BundleProposer struct { + ctx context.Context + db *gorm.DB + + chunkOrm *orm.Chunk + batchOrm *orm.Batch + bundleOrm *orm.Bundle + + maxBatchNumPerBundle uint64 + bundleTimeoutSec uint64 + + chainCfg *params.ChainConfig + + bundleProposerCircleTotal prometheus.Counter + proposeBundleFailureTotal prometheus.Counter + proposeBundleUpdateInfoTotal prometheus.Counter + proposeBundleUpdateInfoFailureTotal prometheus.Counter + bundleBatchesNum prometheus.Gauge + bundleFirstBlockTimeoutReached prometheus.Counter + bundleBatchesProposeNotEnoughTotal prometheus.Counter +} + +// NewBundleProposer creates a new BundleProposer instance. +func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer { + log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec) + + p := &BundleProposer{ + ctx: ctx, + db: db, + chunkOrm: orm.NewChunk(db), + batchOrm: orm.NewBatch(db), + bundleOrm: orm.NewBundle(db), + maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle, + bundleTimeoutSec: cfg.BundleTimeoutSec, + chainCfg: chainCfg, + + bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_circle_total", + Help: "Total number of propose bundle attempts.", + }), + proposeBundleFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_failure_total", + Help: "Total number of propose bundle failures.", + }), + proposeBundleUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_update_info_total", + Help: "Total number of propose bundle update info attempts.", + }), + proposeBundleUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_update_info_failure_total", + Help: "Total number of propose bundle update info failures.", + }), + bundleBatchesNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Name: "rollup_propose_bundle_batches_number", + Help: "The number of batches in the current bundle.", + }), + bundleFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_first_block_timeout_reached_total", + Help: "Total times the first block in a bundle reached the timeout.", + }), + bundleBatchesProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "rollup_propose_bundle_batches_propose_not_enough_total", + Help: "Total number of times there were not enough batches to propose a bundle.", + }), + } + + return p +} + +// TryProposeBundle tries to propose a new bundle. +func (p *BundleProposer) TryProposeBundle() { + p.bundleProposerCircleTotal.Inc() + if err := p.proposeBundle(); err != nil { + p.proposeBundleFailureTotal.Inc() + log.Error("propose new bundle failed", "err", err) + return + } +} + +func (p *BundleProposer) updateDBBundleInfo(batches []*orm.Batch, codecVersion encoding.CodecVersion) error { + if len(batches) == 0 { + return nil + } + + p.proposeBundleUpdateInfoTotal.Inc() + err := p.db.Transaction(func(dbTX *gorm.DB) error { + bundle, err := p.bundleOrm.InsertBundle(p.ctx, batches, codecVersion, dbTX) + if err != nil { + log.Warn("BundleProposer.InsertBundle failed", "err", err) + return err + } + if err := p.batchOrm.UpdateBundleHashInRange(p.ctx, bundle.StartBatchIndex, bundle.EndBatchIndex, bundle.Hash, dbTX); err != nil { + log.Error("failed to update bundle_hash for batches", "bundle hash", bundle.Hash, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + return err + } + return nil + }) + if err != nil { + p.proposeBundleUpdateInfoFailureTotal.Inc() + log.Error("update chunk info in orm failed", "err", err) + return err + } + return nil +} + +func (p *BundleProposer) proposeBundle() error { + firstUnbundledBatchIndex, err := p.bundleOrm.GetFirstUnbundledBatchIndex(p.ctx) + if err != nil { + return err + } + + // select at most maxBlocksThisChunk blocks + maxBatchesThisBundle := p.maxBatchNumPerBundle + batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, encoding.CodecV3, int(maxBatchesThisBundle)) + if err != nil { + return err + } + + if len(batches) == 0 { + return nil + } + + // Ensure all blocks in the same chunk use the same hardfork name + // If a different hardfork name is found, truncate the blocks slice at that point + firstChunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[0].StartChunkIndex) + if err != nil { + return err + } + hardforkName := forks.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) + codecVersion := forks.GetCodecVersion(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) + for i := 1; i < len(batches); i++ { + chunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[i].StartChunkIndex) + if err != nil { + return err + } + currentHardfork := forks.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime) + if currentHardfork != hardforkName { + batches = batches[:i] + maxBatchesThisBundle = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork + break + } + } + + if uint64(len(batches)) == maxBatchesThisBundle { + log.Info("reached maximum number of batches per bundle", "batch count", len(batches), "start batch index", batches[0].Index, "end batch index", batches[len(batches)-1].Index) + p.bundleFirstBlockTimeoutReached.Inc() + p.bundleBatchesNum.Set(float64(len(batches))) + return p.updateDBBundleInfo(batches, codecVersion) + } + + currentTimeSec := uint64(time.Now().Unix()) + if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec { + log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec) + p.bundleFirstBlockTimeoutReached.Inc() + p.bundleBatchesNum.Set(float64(len(batches))) + return p.updateDBBundleInfo(batches, codecVersion) + } + + log.Debug("pending batches are not enough and do not contain a timeout batch") + p.bundleBatchesProposeNotEnoughTotal.Inc() + return nil +} diff --git a/rollup/internal/controller/watcher/bundle_proposer_test.go b/rollup/internal/controller/watcher/bundle_proposer_test.go new file mode 100644 index 0000000000..5b2e4458f5 --- /dev/null +++ b/rollup/internal/controller/watcher/bundle_proposer_test.go @@ -0,0 +1,226 @@ +package watcher + +import ( + "context" + "math" + "math/big" + "testing" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" + gethTypes "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/params" + "github.com/stretchr/testify/assert" + + "scroll-tech/common/database" + "scroll-tech/common/types" + + "scroll-tech/rollup/internal/config" + "scroll-tech/rollup/internal/orm" + "scroll-tech/rollup/internal/utils" +) + +func testBundleProposerLimits(t *testing.T) { + tests := []struct { + name string + maxBatchNumPerBundle uint64 + bundleTimeoutSec uint64 + expectedBundlesLen int + expectedBatchesInFirstBundle uint64 // only be checked when expectedBundlesLen > 0 + }{ + { + name: "NoLimitReached", + maxBatchNumPerBundle: math.MaxUint64, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 0, + }, + { + name: "Timeout", + maxBatchNumPerBundle: math.MaxUint64, + bundleTimeoutSec: 0, + expectedBundlesLen: 1, + expectedBatchesInFirstBundle: 2, + }, + { + name: "maxBatchNumPerBundleIs0", + maxBatchNumPerBundle: 0, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 0, + }, + { + name: "maxBatchNumPerBundleIs1", + maxBatchNumPerBundle: 1, + bundleTimeoutSec: math.MaxUint32, + expectedBundlesLen: 1, + expectedBatchesInFirstBundle: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) + + chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: math.MaxUint64, + MaxL1CommitGasPerChunk: math.MaxUint64, + MaxL1CommitCalldataSizePerChunk: math.MaxUint64, + MaxRowConsumptionPerChunk: math.MaxUint64, + ChunkTimeoutSec: math.MaxUint32, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: math.MaxUint64, + MaxL1CommitCalldataSizePerBatch: math.MaxUint64, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + cp.TryProposeChunk() // chunk1 contains block1 + bap.TryProposeBatch() // batch1 contains chunk1 + cp.TryProposeChunk() // chunk2 contains block2 + bap.TryProposeBatch() // batch2 contains chunk2 + + bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: tt.maxBatchNumPerBundle, + BundleTimeoutSec: tt.bundleTimeoutSec, + }, chainConfig, db, nil) + + bup.TryProposeBundle() + + bundleOrm := orm.NewBundle(db) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, bundles, tt.expectedBundlesLen) + if tt.expectedBundlesLen > 0 { + assert.Equal(t, uint64(1), bundles[0].StartBatchIndex) + assert.Equal(t, tt.expectedBatchesInFirstBundle, bundles[0].EndBatchIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(bundles[0].ProvingStatus)) + } + }) + } +} + +func testBundleProposerRespectHardforks(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) + + chainConfig := ¶ms.ChainConfig{ + BernoulliBlock: big.NewInt(1), + CurieBlock: big.NewInt(2), + DarwinTime: func() *uint64 { t := uint64(4); return &t }(), + } + + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) + + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: math.MaxUint64, + MaxTxNumPerChunk: math.MaxUint64, + MaxL1CommitGasPerChunk: math.MaxUint64, + MaxL1CommitCalldataSizePerChunk: math.MaxUint64, + MaxRowConsumptionPerChunk: math.MaxUint64, + ChunkTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") + for i := int64(1); i <= 60; i++ { + block.Header.Number = big.NewInt(i) + block.Header.Time = uint64(i) + err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block}) + assert.NoError(t, err) + } + + for i := 0; i < 5; i++ { + cp.TryProposeChunk() + } + + bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: math.MaxUint64, + MaxL1CommitCalldataSizePerBatch: math.MaxUint64, + BatchTimeoutSec: 0, + GasCostIncreaseMultiplier: 1, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, chainConfig, db, nil) + + for i := 0; i < 5; i++ { + bap.TryProposeBatch() + } + + bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: math.MaxUint64, + BundleTimeoutSec: 0, + }, chainConfig, db, nil) + + for i := 0; i < 5; i++ { + bup.TryProposeBundle() + } + + bundleOrm := orm.NewBundle(db) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, bundles, 1) + + expectedStartBatchIndices := []uint64{3} + expectedEndChunkIndices := []uint64{3} + for i, bundle := range bundles { + assert.Equal(t, expectedStartBatchIndices[i], bundle.StartBatchIndex) + assert.Equal(t, expectedEndChunkIndices[i], bundle.EndBatchIndex) + } +} diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 574c24d42e..48abc80bbc 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -57,13 +57,15 @@ type ChunkProposer struct { // NewChunkProposer creates a new ChunkProposer instance. func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer { - log.Debug("new chunk proposer", + log.Info("new chunk proposer", + "maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk, "maxTxNumPerChunk", cfg.MaxTxNumPerChunk, "maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk, "maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk, "maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk, "chunkTimeoutSec", cfg.ChunkTimeoutSec, "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier, + "maxBlobSize", maxBlobSize, "maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize) p := &ChunkProposer{ @@ -285,10 +287,9 @@ func (p *ChunkProposer) proposeChunk() error { currentTimeSec := uint64(time.Now().Unix()) if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk { log.Info("reached maximum number of blocks in chunk or first block timeout", - "start block number", chunk.Blocks[0].Header.Number, "block count", len(chunk.Blocks), - "block number", chunk.Blocks[0].Header.Number, - "block timestamp", metrics.FirstBlockTimestamp, + "start block number", chunk.Blocks[0].Header.Number, + "start block timestamp", metrics.FirstBlockTimestamp, "current time", currentTimeSec) p.chunkFirstBlockTimeoutReached.Inc() diff --git a/rollup/internal/controller/watcher/chunk_proposer_test.go b/rollup/internal/controller/watcher/chunk_proposer_test.go index 805810f42c..1a71827e6b 100644 --- a/rollup/internal/controller/watcher/chunk_proposer_test.go +++ b/rollup/internal/controller/watcher/chunk_proposer_test.go @@ -25,7 +25,6 @@ func testChunkProposerCodecv0Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -195,7 +194,6 @@ func testChunkProposerCodecv1Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -365,7 +363,6 @@ func testChunkProposerCodecv2Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -536,7 +533,6 @@ func testChunkProposerCodecv3Limits(t *testing.T) { maxL1CommitCalldataSize uint64 maxRowConsumption uint64 chunkTimeoutSec uint64 - forkBlock *big.Int expectedChunksLen int expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 }{ @@ -727,7 +723,7 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { MaxL1CommitGasPerChunk: math.MaxUint64, MaxL1CommitCalldataSizePerChunk: math.MaxUint64, MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: math.MaxUint64, + ChunkTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) @@ -783,7 +779,7 @@ func testChunkProposerRespectHardforks(t *testing.T) { MaxL1CommitGasPerChunk: math.MaxUint64, MaxL1CommitCalldataSizePerChunk: math.MaxUint64, MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: math.MaxUint64, + ChunkTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, }, ¶ms.ChainConfig{ diff --git a/rollup/internal/controller/watcher/watcher_test.go b/rollup/internal/controller/watcher/watcher_test.go index 806e55a504..23888abb49 100644 --- a/rollup/internal/controller/watcher/watcher_test.go +++ b/rollup/internal/controller/watcher/watcher_test.go @@ -124,6 +124,10 @@ func TestFunction(t *testing.T) { t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit) t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit) t.Run("TestBatchProposerRespectHardforks", testBatchProposerRespectHardforks) + + // Run bundle proposer test cases. + t.Run("TestBundleProposerLimits", testBundleProposerLimits) + t.Run("TestBundleProposerRespectHardforks", testBundleProposerRespectHardforks) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { diff --git a/rollup/internal/orm/batch.go b/rollup/internal/orm/batch.go index 0c0c00545a..ee1d8ba140 100644 --- a/rollup/internal/orm/batch.go +++ b/rollup/internal/orm/batch.go @@ -34,6 +34,7 @@ type Batch struct { WithdrawRoot string `json:"withdraw_root" gorm:"column:withdraw_root"` ParentBatchHash string `json:"parent_batch_hash" gorm:"column:parent_batch_hash"` BatchHeader []byte `json:"batch_header" gorm:"column:batch_header"` + CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"` // proof ChunkProofsStatus int16 `json:"chunk_proofs_status" gorm:"column:chunk_proofs_status;default:1"` @@ -58,6 +59,9 @@ type Batch struct { BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"` BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"` + // bundle + BundleHash string `json:"bundle_hash" gorm:"column:bundle_hash"` + // metadata TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"` TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"` @@ -157,6 +161,26 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error) return latestBatch.EndChunkIndex + 1, nil } +// GetBatchesGEIndexGECodecVersion retrieves batches that have a batch index greater than or equal to the given index and codec version. +// The returned batches are sorted in ascending order by their index. +func (o *Batch) GetBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("index >= ?", index) + db = db.Where("codec_version >= ?", codecv) + db = db.Order("index ASC") + + if limit > 0 { + db = db.Limit(limit) + } + + var batches []*Batch + if err := db.Find(&batches).Error; err != nil { + return nil, fmt.Errorf("Batch.GetBatchesGEIndexGECodecVersion error: %w", err) + } + return batches, nil +} + // GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes. func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) { if len(hashes) == 0 { @@ -264,6 +288,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer WithdrawRoot: batch.WithdrawRoot().Hex(), ParentBatchHash: batch.ParentBatchHash.Hex(), BatchHeader: batchMeta.BatchBytes, + CodecVersion: int16(codecVersion), ChunkProofsStatus: int16(types.ChunkProofsStatusPending), ProvingStatus: int16(types.ProvingTaskUnassigned), RollupStatus: int16(types.RollupPending), @@ -391,7 +416,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st db = db.Where("hash", hash) if err := db.Updates(updateFields).Error; err != nil { - return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, commitTxHash: %v", err, hash, status.String(), finalizeTxHash) + return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatus error: %w, batch hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash) } return nil } @@ -417,3 +442,73 @@ func (o *Batch) UpdateProofByHash(ctx context.Context, hash string, proof *messa } return nil } + +// UpdateBundleHashInRange updates the bundle_hash for bundles within the specified range (inclusive). +// The range is closed, i.e., it includes both start and end indices. +func (o *Batch) UpdateBundleHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, bundleHash string, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("index >= ? AND index <= ?", startIndex, endIndex) + + if err := db.Update("bundle_hash", bundleHash).Error; err != nil { + return fmt.Errorf("Batch.UpdateBundleHashInRange error: %w, start index: %v, end index: %v, batch hash: %v", err, startIndex, endIndex, bundleHash) + } + return nil +} + +// UpdateProvingStatusByBundleHash updates the proving_status for batches within the specified bundle_hash +func (o *Batch) UpdateProvingStatusByBundleHash(ctx context.Context, bundleHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ?", bundleHash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateProvingStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String()) + } + return nil +} + +// UpdateFinalizeTxHashAndRollupStatusByBundleHash updates the the finalize transaction hash and rollup status for batches within the specified bundle_hash +func (o *Batch) UpdateFinalizeTxHashAndRollupStatusByBundleHash(ctx context.Context, bundleHash string, finalizeTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["rollup_status"] = int(status) + + switch status { + case types.RollupFinalized: + updateFields["finalized_at"] = utils.NowUTC() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Batch{}) + db = db.Where("bundle_hash = ?", bundleHash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Batch.UpdateFinalizeTxHashAndRollupStatusByBundleHash error: %w, bundle hash: %v, status: %v", err, bundleHash, status.String()) + } + return nil +} diff --git a/rollup/internal/orm/bundle.go b/rollup/internal/orm/bundle.go new file mode 100644 index 0000000000..afd861729d --- /dev/null +++ b/rollup/internal/orm/bundle.go @@ -0,0 +1,282 @@ +package orm + +import ( + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto" + "gorm.io/gorm" + + "scroll-tech/common/types" + "scroll-tech/common/types/message" + "scroll-tech/common/utils" +) + +// Bundle represents a bundle of batches. +type Bundle struct { + db *gorm.DB `gorm:"column:-"` + + // bundle + Index uint64 `json:"index" gorm:"column:index;primaryKey"` + Hash string `json:"hash" gorm:"column:hash"` + StartBatchIndex uint64 `json:"start_batch_index" gorm:"column:start_batch_index"` + EndBatchIndex uint64 `json:"end_batch_index" gorm:"column:end_batch_index"` + StartBatchHash string `json:"start_batch_hash" gorm:"column:start_batch_hash"` + EndBatchHash string `json:"end_batch_hash" gorm:"column:end_batch_hash"` + CodecVersion int16 `json:"codec_version" gorm:"column:codec_version"` + + // proof + BatchProofsStatus int16 `json:"batch_proofs_status" gorm:"column:batch_proofs_status;default:1"` + ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:1"` + Proof []byte `json:"proof" gorm:"column:proof;default:NULL"` + ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"` + ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"` + ProofTimeSec int32 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:NULL"` + + // rollup + RollupStatus int16 `json:"rollup_status" gorm:"column:rollup_status;default:1"` + FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"` + FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"` + + // metadata + CreatedAt time.Time `json:"created_at" gorm:"column:created_at"` + UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"` + DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"` +} + +// NewBundle creates a new Bundle database instance. +func NewBundle(db *gorm.DB) *Bundle { + return &Bundle{db: db} +} + +// TableName returns the table name for the Bundle model. +func (*Bundle) TableName() string { + return "bundle" +} + +// getLatestBundle retrieves the latest bundle from the database. +func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Order("index desc") + + var latestBundle Bundle + if err := db.First(&latestBundle).Error; err != nil { + if err == gorm.ErrRecordNotFound { + return nil, nil + } + return nil, fmt.Errorf("getLatestBundle error: %w", err) + } + return &latestBundle, nil +} + +// GetBundles retrieves selected bundles from the database. +// The returned bundles are sorted in ascending order by their index. +// only used in unit tests. +func (o *Bundle) GetBundles(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + + for key, value := range fields { + db = db.Where(key, value) + } + + for _, orderBy := range orderByList { + db = db.Order(orderBy) + } + + if limit > 0 { + db = db.Limit(limit) + } + + db = db.Order("index ASC") + + var bundles []*Bundle + if err := db.Find(&bundles).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetBundles error: %w, fields: %v, orderByList: %v", err, fields, orderByList) + } + return bundles, nil +} + +// GetFirstUnbundledBatchIndex retrieves the first unbundled batch index. +func (o *Bundle) GetFirstUnbundledBatchIndex(ctx context.Context) (uint64, error) { + // Get the latest bundle + latestBundle, err := o.getLatestBundle(ctx) + if err != nil { + return 0, fmt.Errorf("Bundle.GetFirstUnbundledBatchIndex error: %w", err) + } + if latestBundle == nil { + return 0, nil + } + return latestBundle.EndBatchIndex + 1, nil +} + +// GetFirstPendingBundle retrieves the first pending bundle from the database. +func (o *Bundle) GetFirstPendingBundle(ctx context.Context) (*Bundle, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("rollup_status = ?", types.RollupPending) + db = db.Order("index asc") + + var pendingBundle Bundle + if err := db.First(&pendingBundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetFirstPendingBundle error: %w", err) + } + return &pendingBundle, nil +} + +// GetVerifiedProofByHash retrieves the verified aggregate proof for a bundle with the given hash. +func (o *Bundle) GetVerifiedProofByHash(ctx context.Context, hash string) (*message.BundleProof, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Select("proof") + db = db.Where("hash = ? AND proving_status = ?", hash, types.ProvingTaskVerified) + + var bundle Bundle + if err := db.Find(&bundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash) + } + + var proof message.BundleProof + if err := json.Unmarshal(bundle.Proof, &proof); err != nil { + return nil, fmt.Errorf("Bundle.GetVerifiedProofByHash error: %w, bundle hash: %v", err, hash) + } + return &proof, nil +} + +// InsertBundle inserts a new bundle into the database. +// Assuming input batches are ordered by index. +func (o *Bundle) InsertBundle(ctx context.Context, batches []*Batch, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Bundle, error) { + if len(batches) == 0 { + return nil, errors.New("Bundle.InsertBundle error: no batches provided") + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + + newBundle := Bundle{ + StartBatchHash: batches[0].Hash, + StartBatchIndex: batches[0].Index, + EndBatchHash: batches[len(batches)-1].Hash, + EndBatchIndex: batches[len(batches)-1].Index, + BatchProofsStatus: int16(types.BatchProofsStatusPending), + ProvingStatus: int16(types.ProvingTaskUnassigned), + RollupStatus: int16(types.RollupPending), + CodecVersion: int16(codecVersion), + } + + // Not part of DA hash, used for SQL query consistency and ease of use. + // Derived using keccak256(concat(start_batch_hash_bytes, end_batch_hash_bytes)). + newBundle.Hash = hex.EncodeToString(crypto.Keccak256(append(common.Hex2Bytes(newBundle.StartBatchHash[2:]), common.Hex2Bytes(newBundle.EndBatchHash[2:])...))) + + if err := db.Create(&newBundle).Error; err != nil { + return nil, fmt.Errorf("Bundle.InsertBundle Create error: %w, bundle hash: %v", err, newBundle.Hash) + } + + return &newBundle, nil +} + +// UpdateFinalizeTxHashAndRollupStatus updates the finalize transaction hash and rollup status for a bundle. +func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash string, finalizeTxHash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["finalize_tx_hash"] = finalizeTxHash + updateFields["rollup_status"] = int(status) + if status == types.RollupFinalized { + updateFields["finalized_at"] = time.Now() + } + + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateFinalizeTxHashAndRollupStatus error: %w, bundle hash: %v, status: %v, finalizeTxHash: %v", err, hash, status.String(), finalizeTxHash) + } + return nil +} + +// UpdateProvingStatus updates the proving status of a bundle. +func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateProvingStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String()) + } + return nil +} + +// UpdateRollupStatus updates the rollup status for a bundle. +// only used in unit tests. +func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status types.RollupStatus) error { + updateFields := make(map[string]interface{}) + updateFields["rollup_status"] = int(status) + if status == types.RollupFinalized { + updateFields["finalized_at"] = time.Now() + } + + db := o.db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateRollupStatus error: %w, bundle hash: %v, status: %v", err, hash, status.String()) + } + return nil +} + +// UpdateProofAndProvingStatusByHash updates the bundle proof and proving status by hash. +// only used in unit tests. +func (o *Bundle) UpdateProofAndProvingStatusByHash(ctx context.Context, hash string, proof *message.BundleProof, provingStatus types.ProvingStatus, proofTimeSec uint64, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + + proofBytes, err := json.Marshal(proof) + if err != nil { + return err + } + + updateFields := make(map[string]interface{}) + updateFields["proof"] = proofBytes + updateFields["proving_status"] = provingStatus + updateFields["proof_time_sec"] = proofTimeSec + updateFields["proved_at"] = utils.NowUTC() + + db = db.WithContext(ctx) + db = db.Model(&Bundle{}) + db = db.Where("hash", hash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Bundle.UpdateProofAndProvingStatusByHash error: %w, bundle hash: %v", err, hash) + } + return nil +} diff --git a/rollup/internal/orm/orm_test.go b/rollup/internal/orm/orm_test.go index f37f169c48..1f93486d3c 100644 --- a/rollup/internal/orm/orm_test.go +++ b/rollup/internal/orm/orm_test.go @@ -10,6 +10,8 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/da-codec/encoding/codecv0" "github.com/scroll-tech/da-codec/encoding/codecv1" + "github.com/scroll-tech/da-codec/encoding/codecv2" + "github.com/scroll-tech/da-codec/encoding/codecv3" "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" "github.com/stretchr/testify/assert" @@ -17,6 +19,7 @@ import ( "scroll-tech/common/testcontainers" "scroll-tech/common/types" + "scroll-tech/common/types/message" "scroll-tech/database/migrate" "scroll-tech/rollup/internal/utils" @@ -29,6 +32,7 @@ var ( l2BlockOrm *L2Block chunkOrm *Chunk batchOrm *Batch + bundleOrm *Bundle pendingTransactionOrm *PendingTransaction block1 *encoding.Block @@ -59,6 +63,7 @@ func setupEnv(t *testing.T) { assert.NoError(t, err) assert.NoError(t, migrate.ResetDB(sqlDB)) + bundleOrm = NewBundle(db) batchOrm = NewBatch(db) chunkOrm = NewChunk(db) l2BlockOrm = NewL2Block(db) @@ -165,7 +170,7 @@ func TestL2BlockOrm(t *testing.T) { } func TestChunkOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -184,7 +189,7 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, createErr) chunkHash2, err = daChunk2.Hash() assert.NoError(t, err) - } else { + } else if codecVersion == encoding.CodecV1 { daChunk1, createErr := codecv1.NewDAChunk(chunk1, 0) assert.NoError(t, createErr) chunkHash1, err = daChunk1.Hash() @@ -194,6 +199,26 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, createErr) chunkHash2, err = daChunk2.Hash() assert.NoError(t, err) + } else if codecVersion == encoding.CodecV2 { + daChunk1, createErr := codecv2.NewDAChunk(chunk1, 0) + assert.NoError(t, createErr) + chunkHash1, err = daChunk1.Hash() + assert.NoError(t, err) + + daChunk2, createErr := codecv2.NewDAChunk(chunk2, chunk1.NumL1Messages(0)) + assert.NoError(t, createErr) + chunkHash2, err = daChunk2.Hash() + assert.NoError(t, err) + } else { + daChunk1, createErr := codecv3.NewDAChunk(chunk1, 0) + assert.NoError(t, createErr) + chunkHash1, err = daChunk1.Hash() + assert.NoError(t, err) + + daChunk2, createErr := codecv3.NewDAChunk(chunk2, chunk1.NumL1Messages(0)) + assert.NoError(t, createErr) + chunkHash2, err = daChunk2.Hash() + assert.NoError(t, err) } dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, utils.ChunkMetrics{}) @@ -238,7 +263,7 @@ func TestChunkOrm(t *testing.T) { } func TestBatchOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -247,10 +272,8 @@ func TestBatchOrm(t *testing.T) { assert.NoError(t, migrate.ResetDB(sqlDB)) batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk1}, + Index: 0, + Chunks: []*encoding.Chunk{chunk1}, } batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{}) assert.NoError(t, err) @@ -264,18 +287,24 @@ func TestBatchOrm(t *testing.T) { daBatch1, createErr := codecv0.NewDABatchFromBytes(batch1.BatchHeader) assert.NoError(t, createErr) batchHash1 = daBatch1.Hash().Hex() - } else { + } else if codecVersion == encoding.CodecV1 { daBatch1, createErr := codecv1.NewDABatchFromBytes(batch1.BatchHeader) assert.NoError(t, createErr) batchHash1 = daBatch1.Hash().Hex() + } else if codecVersion == encoding.CodecV2 { + daBatch1, createErr := codecv2.NewDABatchFromBytes(batch1.BatchHeader) + assert.NoError(t, createErr) + batchHash1 = daBatch1.Hash().Hex() + } else { + daBatch1, createErr := codecv3.NewDABatchFromBytes(batch1.BatchHeader) + assert.NoError(t, createErr) + batchHash1 = daBatch1.Hash().Hex() } assert.Equal(t, hash1, batchHash1) batch = &encoding.Batch{ - Index: 1, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk2}, + Index: 1, + Chunks: []*encoding.Chunk{chunk2}, } batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, utils.BatchMetrics{}) assert.NoError(t, err) @@ -351,9 +380,195 @@ func TestBatchOrm(t *testing.T) { assert.NotNil(t, updatedBatch) assert.Equal(t, "finalizeTxHash", updatedBatch.FinalizeTxHash) assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(updatedBatch.RollupStatus)) + + batches, err := batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + assert.Equal(t, batchHash2, batches[1].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 1) + assert.NoError(t, err) + assert.Equal(t, 1, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 1, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 1, len(batches)) + assert.Equal(t, batchHash2, batches[0].Hash) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion+1, 0) + assert.NoError(t, err) + assert.Equal(t, 0, len(batches)) + + err = batchOrm.UpdateBundleHashInRange(context.Background(), 0, 0, "test hash") + assert.NoError(t, err) + + err = batchOrm.UpdateProvingStatusByBundleHash(context.Background(), "test hash", types.ProvingTaskFailed) + assert.NoError(t, err) + + err = batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(context.Background(), "test hash", "tx hash", types.RollupCommitFailed) + assert.NoError(t, err) + + batches, err = batchOrm.GetBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(batches)) + assert.Equal(t, batchHash1, batches[0].Hash) + assert.Equal(t, batchHash2, batches[1].Hash) + assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus)) + assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(batches[1].ProvingStatus)) + assert.Equal(t, types.RollupFinalizeFailed, types.RollupStatus(batches[1].RollupStatus)) } } +func TestBundleOrm(t *testing.T) { + sqlDB, err := db.DB() + assert.NoError(t, err) + assert.NoError(t, migrate.ResetDB(sqlDB)) + + chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} + batch1 := &encoding.Batch{ + Index: 0, + Chunks: []*encoding.Chunk{chunk1}, + } + dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV3, utils.BatchMetrics{}) + assert.NoError(t, err) + + chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} + batch2 := &encoding.Batch{ + Index: 1, + Chunks: []*encoding.Chunk{chunk2}, + } + dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV3, utils.BatchMetrics{}) + assert.NoError(t, err) + + var bundle1 *Bundle + var bundle2 *Bundle + + t.Run("InsertBundle", func(t *testing.T) { + bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV3) + assert.NoError(t, err) + assert.NotNil(t, bundle1) + assert.Equal(t, uint64(0), bundle1.StartBatchIndex) + assert.Equal(t, uint64(0), bundle1.EndBatchIndex) + assert.Equal(t, dbBatch1.Hash, bundle1.StartBatchHash) + assert.Equal(t, dbBatch1.Hash, bundle1.EndBatchHash) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle1.CodecVersion)) + + bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV3) + assert.NoError(t, err) + assert.NotNil(t, bundle2) + assert.Equal(t, uint64(1), bundle2.StartBatchIndex) + assert.Equal(t, uint64(1), bundle2.EndBatchIndex) + assert.Equal(t, dbBatch2.Hash, bundle2.StartBatchHash) + assert.Equal(t, dbBatch2.Hash, bundle2.EndBatchHash) + assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle2.CodecVersion)) + }) + + t.Run("GetFirstUnbundledBatchIndex", func(t *testing.T) { + index, err := bundleOrm.GetFirstUnbundledBatchIndex(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), index) + }) + + t.Run("GetFirstPendingBundle", func(t *testing.T) { + bundle, err := bundleOrm.GetFirstPendingBundle(context.Background()) + assert.NoError(t, err) + assert.NotNil(t, bundle) + assert.Equal(t, int16(types.RollupPending), bundle.RollupStatus) + }) + + t.Run("UpdateFinalizeTxHashAndRollupStatus", func(t *testing.T) { + err := bundleOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), bundle1.Hash, "0xabcd", types.RollupFinalized) + assert.NoError(t, err) + + pendingBundle, err := bundleOrm.GetFirstPendingBundle(context.Background()) + assert.NoError(t, err) + assert.Equal(t, uint64(2), pendingBundle.Index) + + var finalizedBundle Bundle + err = db.Where("hash = ?", bundle1.Hash).First(&finalizedBundle).Error + assert.NoError(t, err) + assert.Equal(t, "0xabcd", finalizedBundle.FinalizeTxHash) + assert.Equal(t, int16(types.RollupFinalized), finalizedBundle.RollupStatus) + assert.NotNil(t, finalizedBundle.FinalizedAt) + }) + + t.Run("UpdateProvingStatus", func(t *testing.T) { + err := bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskAssigned) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, int16(types.ProvingTaskAssigned), bundle.ProvingStatus) + assert.NotNil(t, bundle.ProverAssignedAt) + + err = bundleOrm.UpdateProvingStatus(context.Background(), bundle1.Hash, types.ProvingTaskVerified) + assert.NoError(t, err) + + err = db.Where("hash = ?", bundle1.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, int16(types.ProvingTaskVerified), bundle.ProvingStatus) + assert.NotNil(t, bundle.ProvedAt) + }) + + t.Run("GetVerifiedProofByHash", func(t *testing.T) { + proof := &message.BundleProof{ + Proof: []byte("test proof"), + } + proofBytes, err := json.Marshal(proof) + assert.NoError(t, err) + + err = db.Model(&Bundle{}).Where("hash = ?", bundle1.Hash).Update("proof", proofBytes).Error + assert.NoError(t, err) + + retrievedProof, err := bundleOrm.GetVerifiedProofByHash(context.Background(), bundle1.Hash) + assert.NoError(t, err) + assert.Equal(t, proof.Proof, retrievedProof.Proof) + }) + + t.Run("GetBundles", func(t *testing.T) { + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Equal(t, 2, len(bundles)) + assert.Equal(t, bundle1.Hash, bundles[0].Hash) + assert.Equal(t, bundle2.Hash, bundles[1].Hash) + }) + + t.Run("UpdateProofAndProvingStatusByHash", func(t *testing.T) { + proof := &message.BundleProof{ + Proof: []byte("new test proof"), + } + err := bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle2.Hash, proof, types.ProvingTaskVerified, 600) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(bundle.ProvingStatus)) + assert.Equal(t, int32(600), bundle.ProofTimeSec) + assert.NotNil(t, bundle.ProvedAt) + + var retrievedProof message.BundleProof + err = json.Unmarshal(bundle.Proof, &retrievedProof) + assert.NoError(t, err) + assert.Equal(t, proof.Proof, retrievedProof.Proof) + }) + + t.Run("UpdateRollupStatus", func(t *testing.T) { + err := bundleOrm.UpdateRollupStatus(context.Background(), bundle2.Hash, types.RollupFinalized) + assert.NoError(t, err) + + var bundle Bundle + err = db.Where("hash = ?", bundle2.Hash).First(&bundle).Error + assert.NoError(t, err) + assert.Equal(t, types.RollupFinalized, types.RollupStatus(bundle.RollupStatus)) + assert.NotNil(t, bundle.FinalizedAt) + }) +} + func TestPendingTransactionOrm(t *testing.T) { sqlDB, err := db.DB() assert.NoError(t, err) diff --git a/rollup/mock_bridge/MockBridge.sol b/rollup/mock_bridge/MockBridge.sol index f9dc85201b..c94bc221b3 100644 --- a/rollup/mock_bridge/MockBridge.sol +++ b/rollup/mock_bridge/MockBridge.sol @@ -1,12 +1,17 @@ -// SPDX-License-Identifier: UNLICENSED +// SPDX-License-Identifier: MIT pragma solidity ^0.8.24; import {BatchHeaderV0Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV0Codec.sol"; import {BatchHeaderV1Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV1Codec.sol"; +import {BatchHeaderV3Codec} from "../../../scroll-contracts/src/libraries/codec/BatchHeaderV3Codec.sol"; import {ChunkCodecV0} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV0.sol"; import {ChunkCodecV1} from "../../../scroll-contracts/src/libraries/codec/ChunkCodecV1.sol"; contract MockBridge { + /********** + * Errors * + **********/ + /// @dev Thrown when committing a committed batch. error ErrorBatchIsAlreadyCommitted(); @@ -20,7 +25,7 @@ contract MockBridge { error ErrorCallPointEvaluationPrecompileFailed(); /// @dev Thrown when the transaction has multiple blobs. - error ErrorFoundMultipleBlob(); + error ErrorFoundMultipleBlobs(); /// @dev Thrown when some fields are not zero in genesis batch. error ErrorGenesisBatchHasNonZeroField(); @@ -43,11 +48,8 @@ contract MockBridge { /// @dev Thrown when the batch index is incorrect. error ErrorIncorrectBatchIndex(); - /// @dev Thrown when the previous state root doesn't match stored one. - error ErrorIncorrectPreviousStateRoot(); - - /// @dev Thrown when the batch header version is invalid. - error ErrorInvalidBatchHeaderVersion(); + /// @dev Thrown when the batch version is incorrect. + error ErrorIncorrectBatchVersion(); /// @dev Thrown when no blob found in the transaction. error ErrorNoBlobFound(); @@ -55,9 +57,6 @@ contract MockBridge { /// @dev Thrown when the number of transactions is less than number of L1 message in one block. error ErrorNumTxsLessThanNumL1Msgs(); - /// @dev Thrown when the given previous state is zero. - error ErrorPreviousStateRootIsZero(); - /// @dev Thrown when the given state root is zero. error ErrorStateRootIsZero(); @@ -70,24 +69,37 @@ contract MockBridge { event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash); event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot); - struct L2MessageProof { - uint256 batchIndex; - bytes merkleProof; - } + /************* + * Constants * + *************/ /// @dev Address of the point evaluation precompile used for EIP-4844 blob verification. - address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); + address internal constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the /// point evaluation precompile - uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513; + uint256 internal constant BLS_MODULUS = + 52435875175126190479447740508185965837690552500527637822603658699938581184513; + + /// @notice The chain id of the corresponding layer 2 chain. + uint64 public immutable layer2ChainId; + + /************* + * Variables * + *************/ + + /// @notice The maximum number of transactions allowed in each chunk. + uint256 public maxNumTxInChunk; uint256 public l1BaseFee; uint256 public l1BlobBaseFee; uint256 public l2BaseFee; uint256 public lastFinalizedBatchIndex; + mapping(uint256 => bytes32) public committedBatches; + mapping(uint256 => bytes32) public finalizedStateRoots; + mapping(uint256 => bytes32) public withdrawRoots; function setL1BaseFee(uint256 _l1BaseFee) external { @@ -108,6 +120,8 @@ contract MockBridge { *****************************/ /// @notice Import layer 2 genesis block + /// @param _batchHeader The header of the genesis batch. + /// @param _stateRoot The state root of the genesis block. function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external { // check genesis batch header length if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero(); @@ -141,16 +155,10 @@ contract MockBridge { bytes[] memory _chunks, bytes calldata ) external { - // check whether the batch is empty - if (_chunks.length == 0) revert ErrorBatchIsEmpty(); - - (, bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _loadBatchHeader( - _parentBatchHeader + (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + _parentBatchHeader, + _chunks ); - unchecked { - _batchIndex += 1; - } - if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted(); bytes32 _batchHash; uint256 batchPtr; @@ -166,7 +174,7 @@ contract MockBridge { _totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch) } // store entries, the order matters - BatchHeaderV0Codec.storeVersion(batchPtr, _version); + BatchHeaderV0Codec.storeVersion(batchPtr, 0); BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); @@ -177,9 +185,10 @@ contract MockBridge { batchPtr, BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH ); - } else { - bytes32 blobVersionedHash; - (blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1( + } else if (_version <= 2) { + // versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec, + // but they use different blob encoding and different verifiers. + (_dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1( _totalL1MessagesPoppedOverall, _chunks ); @@ -187,56 +196,125 @@ contract MockBridge { batchPtr := mload(0x40) _totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch) } + // store entries, the order matters - BatchHeaderV1Codec.storeVersion(batchPtr, _version); - BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex); - BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); - BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); - BatchHeaderV1Codec.storeDataHash(batchPtr, _dataHash); - BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, blobVersionedHash); + // Some are using `BatchHeaderV0Codec`, see comments of `BatchHeaderV1Codec`. + BatchHeaderV0Codec.storeVersion(batchPtr, _version); + BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); + BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); + BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); + BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); + BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _getBlobVersionedHash()); BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); - // compute batch hash - _batchHash = BatchHeaderV1Codec.computeBatchHash( + // compute batch hash, V1 and V2 has same code as V0 + _batchHash = BatchHeaderV0Codec.computeBatchHash( batchPtr, BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH ); + } else { + revert ErrorIncorrectBatchVersion(); } - committedBatches[_batchIndex] = _batchHash; - emit CommitBatch(_batchIndex, _batchHash); + _afterCommitBatch(_batchIndex, _batchHash); + } + + /// @dev This function will revert unless all V0/V1/V2 batches are finalized. This is because we start to + /// pop L1 messages in `commitBatchWithBlobProof` but not in `commitBatch`. We also introduce `finalizedQueueIndex` + /// in `L1MessageQueue`. If one of V0/V1/V2 batches not finalized, `L1MessageQueue.pendingQueueIndex` will not + /// match `parentBatchHeader.totalL1MessagePopped` and thus revert. + function commitBatchWithBlobProof( + uint8 _version, + bytes calldata _parentBatchHeader, + bytes[] memory _chunks, + bytes calldata, + bytes calldata _blobDataProof + ) external { + if (_version <= 2) { + revert ErrorIncorrectBatchVersion(); + } + + // allocate memory of batch header and store entries if necessary, the order matters + // @note why store entries if necessary, to avoid stack overflow problem. + // The codes for `version`, `batchIndex`, `l1MessagePopped`, `totalL1MessagePopped` and `dataHash` + // are the same as `BatchHeaderV0Codec`. + // The codes for `blobVersionedHash`, and `parentBatchHash` are the same as `BatchHeaderV1Codec`. + uint256 batchPtr; + assembly { + batchPtr := mload(0x40) + // This is `BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH`, use `193` here to reduce code + // complexity. Be careful that the length may changed in future versions. + mstore(0x40, add(batchPtr, 193)) + } + BatchHeaderV0Codec.storeVersion(batchPtr, _version); + + (bytes32 _parentBatchHash, uint256 _batchIndex, uint256 _totalL1MessagesPoppedOverall) = _beforeCommitBatch( + _parentBatchHeader, + _chunks + ); + BatchHeaderV0Codec.storeBatchIndex(batchPtr, _batchIndex); + + // versions 2 and 3 both use ChunkCodecV1 + (bytes32 _dataHash, uint256 _totalL1MessagesPoppedInBatch) = _commitChunksV1( + _totalL1MessagesPoppedOverall, + _chunks + ); + unchecked { + _totalL1MessagesPoppedOverall += _totalL1MessagesPoppedInBatch; + } + + BatchHeaderV0Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch); + BatchHeaderV0Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall); + BatchHeaderV0Codec.storeDataHash(batchPtr, _dataHash); + + // verify blob versioned hash + bytes32 _blobVersionedHash = _getBlobVersionedHash(); + _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); + BatchHeaderV1Codec.storeBlobVersionedHash(batchPtr, _blobVersionedHash); + BatchHeaderV1Codec.storeParentBatchHash(batchPtr, _parentBatchHash); + + uint256 lastBlockTimestamp; + { + bytes memory lastChunk = _chunks[_chunks.length - 1]; + lastBlockTimestamp = ChunkCodecV1.getLastBlockTimestamp(lastChunk); + } + BatchHeaderV3Codec.storeLastBlockTimestamp(batchPtr, lastBlockTimestamp); + BatchHeaderV3Codec.storeBlobDataProof(batchPtr, _blobDataProof); + + // compute batch hash, V3 has same code as V0 + bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash( + batchPtr, + BatchHeaderV3Codec.BATCH_HEADER_FIXED_LENGTH + ); + + _afterCommitBatch(_batchIndex, _batchHash); } /// @dev We keep this function to upgrade to 4844 more smoothly. function finalizeBatchWithProof( bytes calldata _batchHeader, - bytes32 _prevStateRoot, + bytes32, /*_prevStateRoot*/ bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes calldata ) external { - if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero(); - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute batch hash and verify - (, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); - - // verify previous state root. - if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot(); - - // avoid duplicated verification - if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( + _batchHeader, + _postStateRoot + ); - // check and update lastFinalizedBatchIndex - unchecked { - if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex(); - lastFinalizedBatchIndex = _batchIndex; + // compute public input hash + bytes32 _publicInputHash; + { + bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); + bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; + _publicInputHash = keccak256( + abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash) + ); } - // record state root and withdraw root - finalizedStateRoots[_batchIndex] = _postStateRoot; - withdrawRoots[_batchIndex] = _withdrawRoot; - - emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + // Pop finalized and non-skipped message from L1MessageQueue. + uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); } /// @dev Memory layout of `_blobDataProof`: @@ -247,37 +325,140 @@ contract MockBridge { /// ``` function finalizeBatchWithProof4844( bytes calldata _batchHeader, - bytes32 _prevStateRoot, + bytes32, bytes32 _postStateRoot, bytes32 _withdrawRoot, bytes calldata _blobDataProof, bytes calldata ) external { - if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero(); - if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); - - // compute batch hash and verify - (uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); - bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr); + (uint256 batchPtr, bytes32 _batchHash, uint256 _batchIndex) = _beforeFinalizeBatch( + _batchHeader, + _postStateRoot + ); - // Calls the point evaluation precompile and verifies the output + // compute public input hash + bytes32 _publicInputHash; { - (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( - abi.encodePacked(_blobVersionedHash, _blobDataProof) + bytes32 _dataHash = BatchHeaderV0Codec.getDataHash(batchPtr); + bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(batchPtr); + bytes32 _prevStateRoot = finalizedStateRoots[_batchIndex - 1]; + // verify blob versioned hash + _checkBlobVersionedHash(_blobVersionedHash, _blobDataProof); + _publicInputHash = keccak256( + abi.encodePacked( + layer2ChainId, + _prevStateRoot, + _postStateRoot, + _withdrawRoot, + _dataHash, + _blobDataProof[0:64], + _blobVersionedHash + ) ); - // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the - // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile - if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); - (, uint256 result) = abi.decode(data, (uint256, uint256)); - if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); } - // verify previous state root. - if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot(); + // Pop finalized and non-skipped message from L1MessageQueue. + uint256 _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + _afterFinalizeBatch(_totalL1MessagesPoppedOverall, _batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + } + + function finalizeBundleWithProof( + bytes calldata _batchHeader, + bytes32 _postStateRoot, + bytes32 _withdrawRoot, + bytes calldata + ) external { + if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + // retrieve finalized state root and batch hash from storage + uint256 _finalizedBatchIndex = lastFinalizedBatchIndex; + + // compute pending batch hash and verify + (, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader); + if (_batchIndex <= _finalizedBatchIndex) revert ErrorBatchIsAlreadyVerified(); + + // store in state + // @note we do not store intermediate finalized roots + lastFinalizedBatchIndex = _batchIndex; + finalizedStateRoots[_batchIndex] = _postStateRoot; + withdrawRoots[_batchIndex] = _withdrawRoot; + + emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); + } + + /********************** + * Internal Functions * + **********************/ + + /// @dev Internal function to do common checks before actual batch committing. + /// @param _parentBatchHeader The parent batch header in calldata. + /// @param _chunks The list of chunks in memory. + /// @return _parentBatchHash The batch hash of parent batch header. + /// @return _batchIndex The index of current batch. + /// @return _totalL1MessagesPoppedOverall The total number of L1 messages popped before current batch. + function _beforeCommitBatch(bytes calldata _parentBatchHeader, bytes[] memory _chunks) + private + view + returns ( + bytes32 _parentBatchHash, + uint256 _batchIndex, + uint256 _totalL1MessagesPoppedOverall + ) + { + // check whether the batch is empty + if (_chunks.length == 0) revert ErrorBatchIsEmpty(); + (, _parentBatchHash, _batchIndex, _totalL1MessagesPoppedOverall) = _loadBatchHeader(_parentBatchHeader); + unchecked { + _batchIndex += 1; + } + if (committedBatches[_batchIndex] != 0) revert ErrorBatchIsAlreadyCommitted(); + } + + /// @dev Internal function to do common checks after actual batch committing. + /// @param _batchIndex The index of current batch. + /// @param _batchHash The hash of current batch. + function _afterCommitBatch(uint256 _batchIndex, bytes32 _batchHash) private { + committedBatches[_batchIndex] = _batchHash; + emit CommitBatch(_batchIndex, _batchHash); + } + + /// @dev Internal function to do common checks before actual batch finalization. + /// @param _batchHeader The current batch header in calldata. + /// @param _postStateRoot The state root after current batch. + /// @return batchPtr The start memory offset of current batch in memory. + /// @return _batchHash The hash of current batch. + /// @return _batchIndex The index of current batch. + function _beforeFinalizeBatch(bytes calldata _batchHeader, bytes32 _postStateRoot) + internal + view + returns ( + uint256 batchPtr, + bytes32 _batchHash, + uint256 _batchIndex + ) + { + if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero(); + + // compute batch hash and verify + (batchPtr, _batchHash, _batchIndex, ) = _loadBatchHeader(_batchHeader); // avoid duplicated verification if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified(); + } + /// @dev Internal function to do common checks after actual batch finalization. + /// @param + /// @param _batchIndex The index of current batch. + /// @param _batchHash The hash of current batch. + /// @param _postStateRoot The state root after current batch. + /// @param _withdrawRoot The withdraw trie root after current batch. + function _afterFinalizeBatch( + uint256, + uint256 _batchIndex, + bytes32 _batchHash, + bytes32 _postStateRoot, + bytes32 _withdrawRoot + ) internal { // check and update lastFinalizedBatchIndex unchecked { if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex(); @@ -291,19 +472,43 @@ contract MockBridge { emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot); } - /********************** - * Internal Functions * - **********************/ + /// @dev Internal function to check blob versioned hash. + /// @param _blobVersionedHash The blob versioned hash to check. + /// @param _blobDataProof The blob data proof used to verify the blob versioned hash. + function _checkBlobVersionedHash(bytes32 _blobVersionedHash, bytes calldata _blobDataProof) internal view { + // Calls the point evaluation precompile and verifies the output + (bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall( + abi.encodePacked(_blobVersionedHash, _blobDataProof) + ); + // We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the + // response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile + if (!success) revert ErrorCallPointEvaluationPrecompileFailed(); + (, uint256 result) = abi.decode(data, (uint256, uint256)); + if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput(); + } + + /// @dev Internal function to get the blob versioned hash. + /// @return _blobVersionedHash The retrieved blob versioned hash. + function _getBlobVersionedHash() internal virtual returns (bytes32 _blobVersionedHash) { + bytes32 _secondBlob; + // Get blob's versioned hash + assembly { + _blobVersionedHash := blobhash(0) + _secondBlob := blobhash(1) + } + if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound(); + if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlobs(); + } /// @dev Internal function to commit chunks with version 0 /// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks. /// @param _chunks The list of chunks to commit. /// @return _batchDataHash The computed data hash for the list of chunks. - /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one. + /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one. function _commitChunksV0( uint256 _totalL1MessagesPoppedOverall, bytes[] memory _chunks - ) internal pure returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { + ) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { uint256 _chunksLength = _chunks.length; // load `batchDataHashPtr` and reserve the memory region for chunk data hashes @@ -341,32 +546,12 @@ contract MockBridge { /// @dev Internal function to commit chunks with version 1 /// @param _totalL1MessagesPoppedOverall The number of L1 messages popped before the list of chunks. /// @param _chunks The list of chunks to commit. - /// @return _blobVersionedHash The blob versioned hash for the blob carried in this transaction. /// @return _batchDataHash The computed data hash for the list of chunks. - /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages poped in this batch, including skipped one. + /// @return _totalL1MessagesPoppedInBatch The total number of L1 messages popped in this batch, including skipped one. function _commitChunksV1( uint256 _totalL1MessagesPoppedOverall, bytes[] memory _chunks - ) - internal - view - returns ( - bytes32 _blobVersionedHash, - bytes32 _batchDataHash, - uint256 _totalL1MessagesPoppedInBatch - ) - { - { - bytes32 _secondBlob; - // Get blob's versioned hash - assembly { - _blobVersionedHash := blobhash(0) - _secondBlob := blobhash(1) - } - if (_blobVersionedHash == bytes32(0)) revert ErrorNoBlobFound(); - if (_secondBlob != bytes32(0)) revert ErrorFoundMultipleBlob(); - } - + ) internal view returns (bytes32 _batchDataHash, uint256 _totalL1MessagesPoppedInBatch) { uint256 _chunksLength = _chunks.length; // load `batchDataHashPtr` and reserve the memory region for chunk data hashes @@ -424,22 +609,25 @@ contract MockBridge { version := shr(248, calldataload(_batchHeader.offset)) } - // version should be always 0 or 1 in current code uint256 _length; if (version == 0) { (batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader); - _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); - _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); - } else { + } else if (version <= 2) { (batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader); - _batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length); - _batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr); + } else if (version >= 3) { + (batchPtr, _length) = BatchHeaderV3Codec.loadAndValidate(_batchHeader); } + + // the code for compute batch hash is the same for V0, V1, V2, V3 + // also the `_batchIndex` and `_totalL1MessagesPoppedOverall`. + _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length); + _batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr); + _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); + // only check when genesis is imported if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) { revert ErrorIncorrectBatchHash(); } - _totalL1MessagesPoppedOverall = BatchHeaderV0Codec.getTotalL1MessagePopped(batchPtr); } /// @dev Internal function to commit a chunk with version 0. @@ -452,7 +640,7 @@ contract MockBridge { bytes memory _chunk, uint256 _totalL1MessagesPoppedInBatch, uint256 _totalL1MessagesPoppedOverall - ) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { + ) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { uint256 chunkPtr; uint256 startDataPtr; uint256 dataPtr; @@ -481,6 +669,8 @@ contract MockBridge { } } + // It is used to compute the actual number of transactions in chunk. + uint256 txHashStartDataPtr = dataPtr; // concatenate tx hashes uint256 l2TxPtr = ChunkCodecV0.getL2TxPtr(chunkPtr, _numBlocks); chunkPtr += 1; @@ -510,6 +700,9 @@ contract MockBridge { } } + // check the actual number of transactions in the chunk + if ((dataPtr - txHashStartDataPtr) / 32 > maxNumTxInChunk) revert ErrorTooManyTxsInOneChunk(); + assembly { chunkPtr := add(_chunk, 0x20) } @@ -532,7 +725,7 @@ contract MockBridge { bytes memory _chunk, uint256 _totalL1MessagesPoppedInBatch, uint256 _totalL1MessagesPoppedOverall - ) internal pure returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { + ) internal view returns (bytes32 _dataHash, uint256 _totalNumL1MessagesInChunk) { uint256 chunkPtr; uint256 startDataPtr; uint256 dataPtr; @@ -568,7 +761,7 @@ contract MockBridge { uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr); if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs(); unchecked { - _totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages + _totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages _totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs _totalL1MessagesPoppedInBatch += _numL1MessagesInBlock; _totalL1MessagesPoppedOverall += _numL1MessagesInBlock; @@ -578,6 +771,11 @@ contract MockBridge { } } + // check the actual number of transactions in the chunk + if (_totalTransactionsInChunk > maxNumTxInChunk) { + revert ErrorTooManyTxsInOneChunk(); + } + // compute data hash and store to memory assembly { _dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr)) diff --git a/rollup/tests/bridge_test.go b/rollup/tests/bridge_test.go index a2eb8005c5..991961af5d 100644 --- a/rollup/tests/bridge_test.go +++ b/rollup/tests/bridge_test.go @@ -206,10 +206,8 @@ func TestFunction(t *testing.T) { // l1 rollup and watch rollup events t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch) - t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch) - t.Run("TestCommitBatchAndFinalizeBatch4844", testCommitBatchAndFinalizeBatch4844) - t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfter4844", testCommitBatchAndFinalizeBatchBeforeAndAfter4844) - t.Run("TestCommitBatchAndFinalizeBatchBeforeAndAfterCompression", testCommitBatchAndFinalizeBatchBeforeAndAfterCompression) + t.Run("testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions", testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions) + t.Run("TestCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions", testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions) // l1/l2 gas oracle t.Run("TestImportL1GasPrice", testImportL1GasPrice) diff --git a/rollup/tests/rollup_test.go b/rollup/tests/rollup_test.go index 9f8868c230..5785642732 100644 --- a/rollup/tests/rollup_test.go +++ b/rollup/tests/rollup_test.go @@ -52,157 +52,29 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) { assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus)) } -func testCommitBatchAndFinalizeBatch(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - prepareContracts(t) - - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - defer l2Relayer.StopSenders() - - // Create L1Watcher - l1Cfg := rollupApp.Config.L1Config - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil) - - // add some blocks to db - var blocks []*encoding.Block - for i := int64(0); i < 10; i++ { - header := gethTypes.Header{ - Number: big.NewInt(i + 1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - Root: common.HexToHash("0x1"), - } - blocks = append(blocks, &encoding.Block{ - Header: &header, - Transactions: nil, - WithdrawRoot: common.HexToHash("0x2"), - RowConsumption: &gethTypes.RowConsumption{}, - }) - } - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) - assert.NoError(t, err) - - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - }, ¶ms.ChainConfig{}, db, nil) - - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - }, ¶ms.ChainConfig{}, db, nil) - - cp.TryProposeChunk() - - batchOrm := orm.NewBatch(db) - unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background()) - assert.NoError(t, err) - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0) - assert.NoError(t, err) - assert.Len(t, chunks, 1) - - bp.TryProposeBatch() - - l2Relayer.ProcessPendingBatches() - batch, err := batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) - assert.NoError(t, err) - - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() - - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) -} - -func testCommitBatchAndFinalizeBatch4844(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { +func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + for _, codecVersion := range codecVersions { db := setupDB(t) prepareContracts(t) - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config var chainConfig *params.ChainConfig - if compressed { + if codecVersion == encoding.CodecV0 { + chainConfig = ¶ms.ChainConfig{} + } else if codecVersion == encoding.CodecV1 { + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + } else if codecVersion == encoding.CodecV2 { chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} + chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} } + + // Create L2Relayer + l2Cfg := rollupApp.Config.L2Config l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) - // Create L1Watcher - l1Cfg := rollupApp.Config.L1Config - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil) - // add some blocks to db var blocks []*encoding.Block for i := int64(0); i < 10; i++ { @@ -221,244 +93,138 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) { }) } - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) - assert.NoError(t, err) - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ MaxBlockNumPerChunk: 100, MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 1, - MaxL1CommitCalldataSizePerChunk: 100000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, MaxRowConsumptionPerChunk: 1048319, ChunkTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 1, - MaxL1CommitCalldataSizePerBatch: 100000, + bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: 50000000000, + MaxL1CommitCalldataSizePerBatch: 1000000, BatchTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - cp.TryProposeChunk() + bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: 1000000, + BundleTimeoutSec: 300, + }, chainConfig, db, nil) - batchOrm := orm.NewBatch(db) - unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background()) + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5]) assert.NoError(t, err) - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0) + cp.TryProposeChunk() + bap.TryProposeBatch() + + err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:]) assert.NoError(t, err) - assert.Len(t, chunks, 1) - bp.TryProposeBatch() + cp.TryProposeChunk() + bap.TryProposeBatch() + + bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3. l2Relayer.ProcessPendingBatches() - batch, err := batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful + batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, getErr) + assert.Len(t, batches, 3) + batches = batches[1:] + for _, batch := range batches { + if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) { + return false + } + } + return true }, 30*time.Second, time.Second) - // add dummy proof - proof := &message.BatchProof{ + batchProof := &message.BatchProof{ Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() - - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() + batches = batches[1:] + for _, batch := range batches { + err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100) assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetLatestBatch(context.Background()) + err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - l2Relayer.StopSenders() - database.CloseDB(db) - } -} - -func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) { - compressionTests := []bool{false, true} // false for uncompressed, true for compressed - for _, compressed := range compressionTests { - db := setupDB(t) - - prepareContracts(t) - - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config - var chainConfig *params.ChainConfig - if compressed { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5), CurieBlock: big.NewInt(5)} - } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(5)} } - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - - // Create L1Watcher - l1Cfg := rollupApp.Config.L1Config - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil) - // add some blocks to db - var blocks []*encoding.Block - for i := int64(0); i < 10; i++ { - header := gethTypes.Header{ - Number: big.NewInt(i + 1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - Root: common.HexToHash("0x1"), - } - blocks = append(blocks, &encoding.Block{ - Header: &header, - Transactions: nil, - WithdrawRoot: common.HexToHash("0x2"), - RowConsumption: &gethTypes.RowConsumption{}, - }) + bundleProof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, } - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - cp.TryProposeChunk() - cp.TryProposeChunk() - bp.TryProposeBatch() - bp.TryProposeBatch() - - for i := uint64(0); i < 2; i++ { - l2Relayer.ProcessPendingBatches() - batchOrm := orm.NewBatch(db) - batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1) + for _, bundle := range bundles { + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100) assert.NoError(t, err) - assert.NotNil(t, batch) + } - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + assert.Eventually(t, func() bool { + l2Relayer.ProcessCommittedBatches() + l2Relayer.ProcessPendingBundles() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) + assert.Len(t, batches, 3) + batches = batches[1:] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } - // process committed batch and check status - l2Relayer.ProcessCommittedBatches() + assert.NotEmpty(t, batch.FinalizeTxHash) + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() + if codecVersion == encoding.CodecV0 || codecVersion == encoding.CodecV1 || codecVersion == encoding.CodecV2 { + assert.Len(t, bundles, 0) + } else { + assert.Len(t, bundles, 1) + bundle := bundles[0] + if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, bundle.FinalizeTxHash) + receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - } + assert.Len(t, batches, 2) + for _, batch := range batches { + assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) + assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) + } + } + return true + }, 30*time.Second, time.Second) l2Relayer.StopSenders() database.CloseDB(db) } } -func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { +func testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions(t *testing.T) { db := setupDB(t) defer database.CloseDB(db) @@ -466,15 +232,11 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { // Create L2Relayer l2Cfg := rollupApp.Config.L2Config - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(5)} + chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2), DarwinTime: func() *uint64 { t := uint64(4); return &t }()} l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) defer l2Relayer.StopSenders() - // Create L1Watcher - l1Cfg := rollupApp.Config.L1Config - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil) - // add some blocks to db var blocks []*encoding.Block for i := int64(0); i < 10; i++ { @@ -484,6 +246,7 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { Difficulty: big.NewInt(0), BaseFee: big.NewInt(0), Root: common.HexToHash("0x1"), + Time: uint64(i + 1), } blocks = append(blocks, &encoding.Block{ Header: &header, @@ -507,80 +270,130 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) { MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) - bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: 50000000000, MaxL1CommitCalldataSizePerBatch: 1000000, BatchTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, }, chainConfig, db, nil) + bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ + MaxBatchNumPerBundle: 1000000, + BundleTimeoutSec: 300, + }, chainConfig, db, nil) + + cp.TryProposeChunk() + cp.TryProposeChunk() + cp.TryProposeChunk() cp.TryProposeChunk() cp.TryProposeChunk() - bp.TryProposeBatch() - bp.TryProposeBatch() - for i := uint64(0); i < 2; i++ { - l2Relayer.ProcessPendingBatches() - batchOrm := orm.NewBatch(db) - batch, err := batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) + bap.TryProposeBatch() + bap.TryProposeBatch() + bap.TryProposeBatch() + bap.TryProposeBatch() - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0] - }, 30*time.Second, time.Second) + bup.TryProposeBundle() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) - assert.NotEmpty(t, batch.CommitTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) + l2Relayer.ProcessPendingBatches() - // add dummy proof - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + batchOrm := orm.NewBatch(db) + bundleOrm := orm.NewBundle(db) + + assert.Eventually(t, func() bool { + batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, getErr) + assert.Len(t, batches, 4) + batches = batches[1:] + for _, batch := range batches { + if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) { + return false + } } - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, proof, 100) + return true + }, 30*time.Second, time.Second) + + batchProof := &message.BatchProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + batches = batches[1:] + for _, batch := range batches { + err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 600) assert.NoError(t, err) err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) assert.NoError(t, err) + } - // process committed batch and check status + bundleProof := &message.BundleProof{ + Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, + } + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + for _, bundle := range bundles { + err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100) + assert.NoError(t, err) + } + + assert.Eventually(t, func() bool { l2Relayer.ProcessCommittedBatches() - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) + assert.Len(t, batches, 4) + batches = batches[1:2] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, batch.FinalizeTxHash) + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } + return true + }, 30*time.Second, time.Second) - // fetch rollup events - assert.Eventually(t, func() bool { - err = l1Watcher.FetchContractEvent() - assert.NoError(t, err) - var statuses []types.RollupStatus - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash}) - return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0] - }, 30*time.Second, time.Second) + assert.Eventually(t, func() bool { + l2Relayer.ProcessPendingBundles() - assert.Eventually(t, func() bool { - batch, err = batchOrm.GetBatchByIndex(context.Background(), i+1) - assert.NoError(t, err) - assert.NotNil(t, batch) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 4) + batches = batches[3:] + for _, batch := range batches { + if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { + return false + } assert.NotEmpty(t, batch.FinalizeTxHash) - var receipt *gethTypes.Receipt - receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful - }, 30*time.Second, time.Second) - } + receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) + assert.NoError(t, getErr) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + } + + bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) + assert.NoError(t, err) + assert.Len(t, bundles, 1) + bundle := bundles[0] + if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { + return false + } + assert.NotEmpty(t, bundle.FinalizeTxHash) + receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) + assert.NoError(t, err) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 1) + for _, batch := range batches { + assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) + assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) + } + return true + }, 30*time.Second, time.Second) } diff --git a/scroll-contracts b/scroll-contracts index ca7f0768b6..2ac4f3f7e0 160000 --- a/scroll-contracts +++ b/scroll-contracts @@ -1 +1 @@ -Subproject commit ca7f0768b6640dc10b19f3d4da3943a87bdf11b1 +Subproject commit 2ac4f3f7e090d7127db4b13b3627cb3ce2d762bc