Skip to content

Commit

Permalink
Merge upstream v0.6.6 changes (#134)
Browse files Browse the repository at this point in the history
* check GER and index of synced L1InfoRoot matches with sc values (0xPolygonHermez#3551)

* apply txIndex fix to StoreTransactions; add migration to fix wrong txIndexes (0xPolygonHermez#3556)

* Feature/0xPolygonHermez#3549 reorgs improvement (0xPolygonHermez#3553)

* New reorg function

* mocks

* linter

* Synchronizer tests

* new elderberry smc docker image

* new image

* logs

* fix json rpc

* fix

* Test sync from empty block

* Regular reorg case tested

* linter

* remove empty block + fix LatestSyncedBlockEmpty

* Improve check reorgs when no block is received during the call

* fix RPC error code for eth_estimateGas and eth_call for reverted tx and no return value; fix e2e test;

* fix test

* Extra unit test

* fix reorg until genesis

* disable parallel synchronization

---------

Co-authored-by: tclemos <[email protected]>

* Fix adding tx that matches with tx that is being processed (0xPolygonHermez#3559)

* fix adding  tx that matches (same addr and nonce) tx that is being processing

* fix generate mocks

* fix updateCurrentNonceBalance

* synchronizer:  check l1blocks (0xPolygonHermez#3546)

* wip

* run on background L1block checker

* fix lint and documentation

* fix conflict

* add unittest

* more unittest

* fix lint

* increase timeout for async unittest

* fix unittest

* rename GetResponse for GetResult and fix uniitest

* add a second gorutines for check the newest blocks

* more unittest

* add unittest and run also preCheck on launch

* by default Precheck from FINALIZED and SAFE

* fix unittest, apply PR comments

* changes suggested by ARR552 in integration method

* fix documentation

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* import new network-l1-mock from PR#3553

* fix unittest

* fix PR comments

* fix error

* checkReorgAndExecuteReset can't be call with lastEthBlockSynced=nil

* add parentHash to error

* fix error

* merge 3553 fix unittest

* fix unittest

* fix wrong merge

* adapt parallel reorg detection to flow

* fix unit tests

* fix log

* allow use sync parallel mode

---------

Co-authored-by: Alonso <[email protected]>

* Fix + remove empty blocks (0xPolygonHermez#3564)

* Fix + remove empty blocks

* unit test

* linter

* Fix/0xPolygonHermez#3565 reorg (0xPolygonHermez#3566)

* fix + logs

* fix loop

* Revert "fix + logs"

This reverts commit 39ced69.

* fix L1InfoRoot when an error happens during the process of the L1 information (0xPolygonHermez#3576)

* fix

* Comments + mock

* avoid error from some L1providers when fromBlock is higher than toBlock

* Revert some changes

* comments

* add L2BlockModulus to L1check

* doc

* fix dbTx = nil

* fix unit tests

* added logs to analyze blocking issue when storing L2 block

* add debug logs for datastreamer

* fix 0xPolygonHermez#3581 synchronizer panic synchronizing from trusted node (0xPolygonHermez#3582)

* synchronized: 0xPolygonHermez#3583  stop sync from l2 after no closed batch (0xPolygonHermez#3584)

* stop processing trusted Node after first open batch

* Update datastream lib to the latest version with additional debug info

* update dslib client interface

* Update the diff

* Fix non-e2e tests

* Update the docker image for the mock L1 network

* Update the diff

* Fix typo in the comment

* Use the Geth v1.13.11 Docker image and update the genesis spec

* Update the diff

---------

Co-authored-by: agnusmor <[email protected]>
Co-authored-by: Thiago Coimbra Lemos <[email protected]>
Co-authored-by: Alonso Rodriguez <[email protected]>
Co-authored-by: tclemos <[email protected]>
Co-authored-by: Joan Esteban <[email protected]>
Co-authored-by: Alonso <[email protected]>
Co-authored-by: agnusmor <[email protected]>
Co-authored-by: dPunisher <[email protected]>
  • Loading branch information
9 people authored May 22, 2024
1 parent e74a227 commit 404088f
Show file tree
Hide file tree
Showing 75 changed files with 262,898 additions and 63,613 deletions.
10 changes: 9 additions & 1 deletion config/default.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,15 @@ TrustedSequencerURL = "" # If it is empty or not specified, then the value is re
SyncBlockProtection = "safe" # latest, finalized, safe
L1SynchronizationMode = "sequential"
L1SyncCheckL2BlockHash = true
L1SyncCheckL2BlockNumberhModulus = 30
L1SyncCheckL2BlockNumberhModulus = 600
[Synchronizer.L1BlockCheck]
Enable = true
L1SafeBlockPoint = "finalized"
L1SafeBlockOffset = 0
ForceCheckBeforeStart = true
PreCheckEnable = true
L1PreSafeBlockPoint = "safe"
L1PreSafeBlockOffset = 0
[Synchronizer.L1ParallelSynchronization]
MaxClients = 10
MaxPendingNoProcessedBlocks = 25
Expand Down
182 changes: 91 additions & 91 deletions config/environments/local/local.genesis.config.json

Large diffs are not rendered by default.

25 changes: 25 additions & 0 deletions db/migrations/state/0019.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
-- +migrate Up

-- the update below fix the wrong receipt TX indexes
WITH map_fix_tx_index AS (
SELECT t.l2_block_num AS block_num
, t.hash AS tx_hash
, r.tx_index AS current_index
, (ROW_NUMBER() OVER (PARTITION BY t.l2_block_num ORDER BY r.tx_index))-1 AS correct_index
FROM state.receipt r
INNER JOIN state."transaction" t
ON t.hash = r.tx_hash
)
UPDATE state.receipt AS r
SET tx_index = m.correct_index
FROM map_fix_tx_index m
WHERE m.block_num = r.block_num
AND m.tx_hash = r.tx_hash
AND m.current_index = r.tx_index
AND m.current_index != m.correct_index;


-- +migrate Down

-- no action is needed, the data fixed by the
-- migrate up must remain fixed
145 changes: 145 additions & 0 deletions db/migrations/state/0019_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
package migrations_test

import (
"database/sql"
"testing"

"github.com/0xPolygonHermez/zkevm-node/hex"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/require"
)

type migrationTest0019TestCase struct {
Name string
Block migrationTest0019TestCaseBlock
}

type migrationTest0019TestCaseBlock struct {
Transactions []migrationTest0019TestCaseTransaction
}

type migrationTest0019TestCaseTransaction struct {
CurrentIndex uint
}

type migrationTest0019 struct {
TestCases []migrationTest0019TestCase
}

func (m migrationTest0019) InsertData(db *sql.DB) error {
const addBlock0 = "INSERT INTO state.block (block_num, received_at, block_hash) VALUES (0, now(), '0x0')"
if _, err := db.Exec(addBlock0); err != nil {
return err
}

const addBatch0 = `
INSERT INTO state.batch (batch_num, global_exit_root, local_exit_root, acc_input_hash, state_root, timestamp, coinbase, raw_txs_data, forced_batch_num, wip)
VALUES (0,'0x0000', '0x0000', '0x0000', '0x0000', now(), '0x0000', null, null, true)`
if _, err := db.Exec(addBatch0); err != nil {
return err
}

const addL2Block = "INSERT INTO state.l2block (block_num, block_hash, header, uncles, parent_hash, state_root, received_at, batch_num, created_at) VALUES ($1, $2, '{}', '{}', '0x0', '0x0', now(), 0, now())"
const addTransaction = "INSERT INTO state.transaction (hash, encoded, decoded, l2_block_num, effective_percentage, l2_hash) VALUES ($1, 'ABCDEF', '{}', $2, 255, $1)"
const addReceipt = "INSERT INTO state.receipt (tx_hash, type, post_state, status, cumulative_gas_used, gas_used, effective_gas_price, block_num, tx_index, contract_address) VALUES ($1, 1, null, 1, 1234, 1234, 1, $2, $3, '')"

txUnique := 0
for tci, testCase := range m.TestCases {
blockNum := uint64(tci + 1)
blockHash := common.HexToHash(hex.EncodeUint64(blockNum)).String()
if _, err := db.Exec(addL2Block, blockNum, blockHash); err != nil {
return err
}
for _, tx := range testCase.Block.Transactions {
txUnique++
txHash := common.HexToHash(hex.EncodeUint64(uint64(txUnique))).String()
if _, err := db.Exec(addTransaction, txHash, blockNum); err != nil {
return err
}
if _, err := db.Exec(addReceipt, txHash, blockNum, tx.CurrentIndex); err != nil {
return err
}
}
}

return nil
}

func (m migrationTest0019) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
const getReceiptsByBlock = "SELECT r.tx_index FROM state.receipt r WHERE r.block_num = $1 ORDER BY r.tx_index"

for tci := range m.TestCases {
blockNum := uint64(tci + 1)

rows, err := db.Query(getReceiptsByBlock, blockNum)
require.NoError(t, err)

var expectedIndex = uint(0)
var txIndex uint
for rows.Next() {
err := rows.Scan(&txIndex)
require.NoError(t, err)
require.Equal(t, expectedIndex, txIndex)
expectedIndex++
}
}
}

func (m migrationTest0019) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
m.RunAssertsAfterMigrationUp(t, db)
}

func TestMigration0019(t *testing.T) {
runMigrationTest(t, 19, migrationTest0019{
TestCases: []migrationTest0019TestCase{
{
Name: "single tx with correct index",
Block: migrationTest0019TestCaseBlock{
Transactions: []migrationTest0019TestCaseTransaction{
{CurrentIndex: 0},
},
},
},
{
Name: "multiple txs indexes are correct",
Block: migrationTest0019TestCaseBlock{
Transactions: []migrationTest0019TestCaseTransaction{
{CurrentIndex: 0},
{CurrentIndex: 1},
{CurrentIndex: 2},
},
},
},
{
Name: "single tx with wrong tx index",
Block: migrationTest0019TestCaseBlock{
Transactions: []migrationTest0019TestCaseTransaction{
{CurrentIndex: 3},
},
},
},
{
Name: "multiple txs missing 0 index",
Block: migrationTest0019TestCaseBlock{
Transactions: []migrationTest0019TestCaseTransaction{
{CurrentIndex: 1},
{CurrentIndex: 2},
{CurrentIndex: 3},
{CurrentIndex: 4},
},
},
},
{
Name: "multiple has index 0 but also txs index gap",
Block: migrationTest0019TestCaseBlock{
Transactions: []migrationTest0019TestCaseTransaction{
{CurrentIndex: 0},
{CurrentIndex: 2},
{CurrentIndex: 4},
{CurrentIndex: 6},
},
},
},
},
})
}
28 changes: 28 additions & 0 deletions db/migrations/state/0020.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
-- +migrate Up

-- This migration will delete all empty blocks
DELETE FROM state.block
WHERE NOT EXISTS (SELECT *
FROM state.virtual_batch
WHERE state.virtual_batch.block_num = state.block.block_num)
AND NOT EXISTS (SELECT *
FROM state.verified_batch
WHERE state.verified_batch.block_num = state.block.block_num)
AND NOT EXISTS (SELECT *
FROM state.forced_batch
WHERE state.forced_batch.block_num = state.block.block_num)
AND NOT EXISTS (SELECT *
FROM state.exit_root
WHERE state.exit_root.block_num = state.block.block_num)
AND NOT EXISTS (SELECT *
FROM state.monitored_txs
WHERE state.monitored_txs.block_num = state.block.block_num)
AND NOT EXISTS (SELECT *
FROM state.fork_id
WHERE state.fork_id.block_num = state.block.block_num);



-- +migrate Down

-- no action is needed, the data must remain deleted as it is useless
99 changes: 99 additions & 0 deletions db/migrations/state/0020_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
package migrations_test

import (
"database/sql"
"fmt"
"testing"

"github.com/stretchr/testify/assert"
)

// this migration changes length of the token name
type migrationTest0020 struct{}

func (m migrationTest0020) InsertData(db *sql.DB) error {
addBlocks := `
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(1, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b20', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fe', '2024-03-11 02:52:23.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(2, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b21', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f1', '2024-03-11 02:52:24.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(3, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b22', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f2', '2024-03-11 02:52:25.000', false);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(4, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b23', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f3', '2024-03-11 02:52:26.000', false);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(5, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b24', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f4', '2024-03-11 02:52:27.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(6, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b25', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f5', '2024-03-11 02:52:28.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(7, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b26', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f6', '2024-03-11 02:52:29.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(8, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b27', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f7', '2024-03-11 02:52:30.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(9, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b28', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f8', '2024-03-11 02:52:31.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(10, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b29', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50f9', '2024-03-11 02:52:32.000', true);
INSERT INTO state.block
(block_num, block_hash, parent_hash, received_at, checked)
VALUES(11, '0x013be63487a53c874614dd1ae0434cf211e393b2e386c8fde74da203b5469b2a', '0x0328698ebeda498df8c63040e2a4771d24722ab2c1e8291226b9215c7eec50fa', '2024-03-11 02:52:33.000', true);
INSERT INTO state.batch
(batch_num, global_exit_root, local_exit_root, state_root, acc_input_hash, "timestamp", coinbase, raw_txs_data, forced_batch_num, batch_resources, closing_reason, wip, checked)
VALUES(1, '0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000', '0x3f86b09b43e3e49a41fc20a07579b79eba044253367817d5c241d23c0e2bc5c9', '0xa5bd7311fe00707809dd3aa718be2ea0cb363626b9db44172098515f07acf940', '2023-03-24 16:35:27.000', '0x148Ee7dAF16574cD020aFa34CC658f8F3fbd2800', decode('','hex'), NULL, '{"Bytes": 0, "ZKCounters": {"GasUsed": 0, "UsedSteps": 0, "UsedBinaries": 0, "UsedMemAligns": 0, "UsedArithmetics": 0, "UsedKeccakHashes": 0, "UsedPoseidonHashes": 0, "UsedSha256Hashes_V2": 0, "UsedPoseidonPaddings": 0}}'::jsonb, '', false, true);
INSERT INTO state.virtual_batch
(batch_num, tx_hash, coinbase, block_num, sequencer_addr, timestamp_batch_etrog, l1_info_root)
VALUES(1, '0x4314ed5d8ad4812e88895942b2b4642af176d80a97c5489a16a7a5aeb08b51a6', '0x148Ee7dAF16574cD020aFa34CC658f8F3fbd2800', 2, '0x148Ee7dAF16574cD020aFa34CC658f8F3fbd2800', '2024-04-09 16:26:45.000', '0xcdb4258d7ccd8fd41c4a26fd8d9d1fadbc9c506e64d489170525a65e2ad3580b');
INSERT INTO state.verified_batch
(batch_num, tx_hash, aggregator, state_root, block_num, is_trusted)
VALUES(1, '0x28e82f15ab7bac043598623c65a838c315d00ecb5d6e013c406d6bb889680592', '0x6329Fe417621925C81c16F9F9a18c203C21Af7ab', '0x80bd488b1e150b9b42611d038c7fdfa43a3e95b3a02e5c2d57074e73b583f8fd', 3, true);
INSERT INTO state.fork_id
(fork_id, from_batch_num, to_batch_num, "version", block_num)
VALUES(5, 813267, 1228916, 'v2.0.0-RC1-fork.5', 5);
INSERT INTO state.monitored_txs
("owner", id, from_addr, to_addr, nonce, value, "data", gas, gas_price, status, history, block_num, created_at, updated_at, gas_offset)
VALUES('sequencer', 'sequence-from-2006249-to-2006252', '0x148Ee7dAF16574cD020aFa34CC658f8F3fbd2800', '0x519E42c24163192Dca44CD3fBDCEBF6be9130987', 58056, NULL, 'def57e540000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000006614ec3100000000000000000000000000000000000000000000000000000000001e9ce8000000000000000000000000148ee7da0000000300000000ee8306089a84ae0baa0082520894417a7ba2d8d0060ae6c54fd098590db854b9c1d58609184e72a0008082044d80802787e068e6fe23cda64eb868cefb7231a17449d508a77919f6c5408814aaab5f259d43a62eb50df0b2d5740552d3f95176a1f0e31cade590facf70b01c1129151bab0b00000003000000000b00000003000000000b00000003000000000b00000003000000000b0000000300000000000000000000000000000000000000', 1474265, 25212431373, 'done', '{0x44423d538d6fc2f2e882fcd0d1952a735d81c824827b83936e6a5e52268a7d8e}', 7, '2024-04-09 09:26:36.235', '2024-04-09 09:38:24.377', 150000);
INSERT INTO state.exit_root
(id, block_num, "timestamp", mainnet_exit_root, rollup_exit_root, global_exit_root, prev_block_hash, l1_info_root, l1_info_tree_index)
VALUES(379599, 8, '2024-04-09 09:43:59.000', decode('C90DCBC69719971625800AD619E5EEEFD0378317E26F0DDE9B30B3C7C84DBD78','hex'), decode('514D72BBF7C2AD8E4D15EC1186EBF077E98208479651B1C30C5AC7DA11BAB209','hex'), decode('B20FACBED4A2774CE33A0F68D9B6F9B4D9AD553DACD73705503910B141D2102E','hex'), decode('845E01F723E5C77DBE5A4889F299860FBECD8353BFD423D366851F3A90496334','hex'), decode('EDB0EF9C80E947C411FD9B8B23318708132F8A3BD15CD366499866EF91748FC8','hex'), 8032);
INSERT INTO state.forced_batch
(block_num, forced_batch_num, global_exit_root, timestamp, raw_txs_data, coinbase)
VALUES(10, 1, '0x3f86b09b43e3e49a41fc20a07579b79eba044253367817d5c241d23c0e2bc5ca', '2024-04-09 09:26:36.235', '0x3f86b09b', '0x3f86b09b43e3e49a41fc20a07579b79eba044253367817d5c241d23c0e2bc5c9');
`
if _, err := db.Exec(addBlocks); err != nil {
return err
}
blockCount := `SELECT count(*) FROM state.block`
var count int
err := db.QueryRow(blockCount).Scan(&count)
if err != nil {
return err
}
if count != 11 {
return fmt.Errorf("error: initial wrong number of blocks")
}
return nil
}

func (m migrationTest0020) RunAssertsAfterMigrationUp(t *testing.T, db *sql.DB) {
blockCount := `SELECT count(*) FROM state.block`
var count int
err := db.QueryRow(blockCount).Scan(&count)
assert.NoError(t, err)
assert.Equal(t, 6, count)
}

func (m migrationTest0020) RunAssertsAfterMigrationDown(t *testing.T, db *sql.DB) {
}

func TestMigration0020(t *testing.T) {
runMigrationTest(t, 20, migrationTest0020{})
}
Loading

0 comments on commit 404088f

Please sign in to comment.