Skip to content

Commit

Permalink
pass E2E test
Browse files Browse the repository at this point in the history
  • Loading branch information
arnaubennassar committed Aug 2, 2024
1 parent 5f0d15a commit acd372e
Show file tree
Hide file tree
Showing 8 changed files with 217 additions and 83 deletions.
8 changes: 4 additions & 4 deletions common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,26 @@ import (
// Uint64ToBytes converts a uint64 to a byte slice
func Uint64ToBytes(num uint64) []byte {
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, num)
binary.BigEndian.PutUint64(bytes, num)

return bytes
}

// BytesToUint64 converts a byte slice to a uint64
func BytesToUint64(bytes []byte) uint64 {
return binary.LittleEndian.Uint64(bytes)
return binary.BigEndian.Uint64(bytes)
}

// Uint32To2Bytes converts a uint32 to a byte slice
func Uint32ToBytes(num uint32) []byte {
key := make([]byte, 4)
binary.LittleEndian.PutUint32(key, num)
binary.BigEndian.PutUint32(key, num)
return key
}

// BytesToUint32 converts a byte slice to a uint32
func BytesToUint32(bytes []byte) uint32 {
return binary.LittleEndian.Uint32(bytes)
return binary.BigEndian.Uint32(bytes)
}

func CalculateAccInputHash(
Expand Down
140 changes: 132 additions & 8 deletions l1infotreesync/e2e_test.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package l1infotreesync
package l1infotreesync_test

import (
"context"
Expand All @@ -11,6 +11,7 @@ import (

"github.com/0xPolygon/cdk-contracts-tooling/contracts/manual/globalexitrootnopush0"
"github.com/0xPolygon/cdk/etherman"
"github.com/0xPolygon/cdk/l1infotreesync"
"github.com/0xPolygon/cdk/reorgdetector"
"github.com/0xPolygon/cdk/test/contracts/verifybatchesmock"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
Expand Down Expand Up @@ -71,12 +72,12 @@ func TestE2E(t *testing.T) {
require.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
require.NoError(t, err)
rdm := NewReorgDetectorMock(t)
rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{}, nil)
rdm := l1infotreesync.NewReorgDetectorMock(t)
rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil)
rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth)
require.NoError(t, err)
syncer, err := New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
require.NoError(t, err)
go syncer.Start(ctx)

Expand Down Expand Up @@ -105,19 +106,19 @@ func TestE2E(t *testing.T) {
require.Equal(t, common.Hash(expectedRoot), actualRoot)
}

// Update 10 rollups 10 times
// Update 10 rollups (verify batches event) 10 times
for rollupID := uint32(1); rollupID < 10; rollupID++ {
for i := 0; i < 10; i++ {
newLocalExitRoot := common.HexToHash(strconv.Itoa(int(rollupID)) + "ffff" + strconv.Itoa(i))
tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true)
tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, i%2 != 0)
require.NoError(t, err)
client.Commit()
// Let the processor catch up
time.Sleep(time.Millisecond * 100)
time.Sleep(time.Millisecond * 10)
receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash())
require.NoError(t, err)
require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful)
require.True(t, len(receipt.Logs) == 2)
require.True(t, len(receipt.Logs) == 1+i%2)

expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false})
require.NoError(t, err)
Expand All @@ -127,3 +128,126 @@ func TestE2E(t *testing.T) {
}
}
}

func TestFinalised(t *testing.T) {
ctx := context.Background()
privateKey, err := crypto.GenerateKey()
require.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
require.NoError(t, err)
client, _, _, _, _, err := newSimulatedClient(auth)
require.NoError(t, err)
for i := 0; i < 100; i++ {
client.Commit()
}

n4, err := client.Client().HeaderByNumber(ctx, big.NewInt(-4))
require.NoError(t, err)
fmt.Println("-4", n4.Number)
n3, err := client.Client().HeaderByNumber(ctx, big.NewInt(-3))
require.NoError(t, err)
fmt.Println("-3", n3.Number)
n2, err := client.Client().HeaderByNumber(ctx, big.NewInt(-2))
require.NoError(t, err)
fmt.Println("-2", n2.Number)
n1, err := client.Client().HeaderByNumber(ctx, big.NewInt(-1))
require.NoError(t, err)
fmt.Println("-1", n1.Number)
n0, err := client.Client().HeaderByNumber(ctx, nil)
require.NoError(t, err)
fmt.Println("0", n0.Number)
fmt.Printf("amount of blocks latest - finalised: %d", n0.Number.Uint64()-n3.Number.Uint64())
}

func TestStressAndReorgs(t *testing.T) {
const (
totalIterations = 10_124
enableReorgs = false // test fails when set to true
reorgEveryXIterations = 53
maxReorgDepth = 5
maxEventsPerBlock = 7
maxRollups = 31
)

ctx := context.Background()
dbPathSyncer := t.TempDir()
dbPathReorg := t.TempDir()
privateKey, err := crypto.GenerateKey()
require.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337))
require.NoError(t, err)
client, gerAddr, verifyAddr, gerSc, verifySC, err := newSimulatedClient(auth)
require.NoError(t, err)
rd, err := reorgdetector.New(ctx, client.Client(), dbPathReorg)
go rd.Start(ctx)
syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 3)
require.NoError(t, err)
go syncer.Start(ctx)

for i := 0; i < totalIterations; i++ {
for j := 0; j < i%maxEventsPerBlock; j++ {
switch j % 3 {
case 0: // Update L1 Info Tree
_, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i)))
require.NoError(t, err)
case 1: // Update L1 Info Tree + Rollup Exit Tree
newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j))
_, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, true)
require.NoError(t, err)
case 2: // Update Rollup Exit Tree
newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j))
_, err := verifySC.VerifyBatches(auth, 1+uint32(i%maxRollups), 0, newLocalExitRoot, common.Hash{}, false)
require.NoError(t, err)
}
}
client.Commit()
time.Sleep(time.Microsecond * 10) // Sleep just enough for goroutine to switch
if enableReorgs && i%reorgEveryXIterations == 0 {
reorgDepth := i%maxReorgDepth + 1
currentBlockNum, err := client.Client().BlockNumber(ctx)
require.NoError(t, err)
targetReorgBlockNum := currentBlockNum - uint64(reorgDepth)
if targetReorgBlockNum < currentBlockNum { // we are dealing with uints...
reorgBlock, err := client.Client().BlockByNumber(ctx, big.NewInt(int64(targetReorgBlockNum)))
require.NoError(t, err)
client.Fork(reorgBlock.Hash())
}
}
}

syncerUpToDate := false
var errMsg string
for i := 0; i < 50; i++ {
lpb, err := syncer.GetLastProcessedBlock(ctx)
require.NoError(t, err)
lb, err := client.Client().BlockNumber(ctx)
require.NoError(t, err)
if lpb == lb {
syncerUpToDate = true
break
}
time.Sleep(time.Millisecond * 100)
errMsg = fmt.Sprintf("last block from client: %d, last block from syncer: %d", lb, lpb)
}
require.True(t, syncerUpToDate, errMsg)

// Assert rollup exit root
expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false})
require.NoError(t, err)
actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx)
require.NoError(t, err)
require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot)

// Assert L1 Info tree root
expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false})
require.NoError(t, err)
expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false})
require.NoError(t, err)
index, actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRootAndIndex(ctx)
require.NoError(t, err)
info, err := syncer.GetInfoByIndex(ctx, index)
require.NoError(t, err, fmt.Sprintf("index: %d", index))

require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot)
require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info))
}
8 changes: 8 additions & 0 deletions l1infotreesync/l1infotreesync.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,3 +125,11 @@ func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uin
func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (common.Hash, error) {
return s.processor.rollupExitTree.GetLastRoot(ctx)
}

func (s *L1InfoTreeSync) GetLastL1InfoTreeRootAndIndex(ctx context.Context) (uint32, common.Hash, error) {
return s.processor.l1InfoTree.GetLastIndexAndRoot(ctx)
}

func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) {
return s.processor.GetLastProcessedBlock(ctx)
}
69 changes: 36 additions & 33 deletions l1infotreesync/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,24 +339,25 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
l1InfoTreeLeavesToAdd := []tree.Leaf{}
rollupExitTreeLeavesToAdd := []tree.Leaf{}
if len(b.Events) > 0 {
var initialIndex uint32
var initialL1InfoIndex uint32
var l1InfoLeavesAdded uint32
lastIndex, err := p.getLastIndex(tx)
if err == ErrNotFound {
initialIndex = 0
initialL1InfoIndex = 0
} else if err != nil {
rollback()
return err
} else {
initialIndex = lastIndex + 1
initialL1InfoIndex = lastIndex + 1
}
var nextExpectedRollupExitTreeRoot *ethCommon.Hash
for i, e := range b.Events {
for _, e := range b.Events {
event := e.(Event)
events = append(events, event)
if event.UpdateL1InfoTree != nil {
index := initialL1InfoIndex + l1InfoLeavesAdded
leafToStore := storeLeaf{
BlockNumber: b.Num,
Index: initialIndex + uint32(i),
Index: index,
MainnetExitRoot: event.UpdateL1InfoTree.MainnetExitRoot,
RollupExitRoot: event.UpdateL1InfoTree.RollupExitRoot,
ParentHash: event.UpdateL1InfoTree.ParentHash,
Expand All @@ -367,10 +368,10 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
return err
}
l1InfoTreeLeavesToAdd = append(l1InfoTreeLeavesToAdd, tree.Leaf{
Index: initialIndex + uint32(i),
Index: leafToStore.Index,
Hash: leafToStore.Hash(),
})
nextExpectedRollupExitTreeRoot = &leafToStore.RollupExitRoot
l1InfoLeavesAdded++
}

if event.VerifyBatches != nil {
Expand All @@ -379,36 +380,38 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error {
// that the computation of the tree is correct. However, there are some execution paths
// on the contract that don't follow this (verifyBatches + pendingStateTimeout != 0)
rollupExitTreeLeavesToAdd = append(rollupExitTreeLeavesToAdd, tree.Leaf{
Index: event.VerifyBatches.RollupID,
Hash: event.VerifyBatches.ExitRoot,
ExpectedRoot: nextExpectedRollupExitTreeRoot,
Index: event.VerifyBatches.RollupID - 1,
Hash: event.VerifyBatches.ExitRoot,
})
nextExpectedRollupExitTreeRoot = nil
}
}
bwl := blockWithLeafs{
FirstIndex: initialIndex,
LastIndex: initialIndex + uint32(len(b.Events)),
}
blockValue, err := json.Marshal(bwl)
if err != nil {
rollback()
return err
}
if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil {
rollback()
return err
}
l1InfoTreeRollback, err = p.l1InfoTree.AddLeaves(tx, l1InfoTreeLeavesToAdd)
if err != nil {
rollback()
return err
if l1InfoLeavesAdded > 0 {
bwl := blockWithLeafs{
FirstIndex: initialL1InfoIndex,
LastIndex: initialL1InfoIndex + l1InfoLeavesAdded,
}
blockValue, err := json.Marshal(bwl)
if err != nil {
rollback()
return err
}
if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil {
rollback()
return err
}
l1InfoTreeRollback, err = p.l1InfoTree.AddLeaves(tx, l1InfoTreeLeavesToAdd)
if err != nil {
rollback()
return err
}
}

rollupExitTreeRollback, err = p.rollupExitTree.UpseartLeaves(tx, rollupExitTreeLeavesToAdd, b.Num)
if err != nil {
rollback()
return err
if len(rollupExitTreeLeavesToAdd) > 0 {
rollupExitTreeRollback, err = p.rollupExitTree.UpseartLeaves(tx, rollupExitTreeLeavesToAdd, b.Num)
if err != nil {
rollback()
return err
}
}
}
if err := p.updateLastProcessedBlock(tx, b.Num); err != nil {
Expand Down
Loading

0 comments on commit acd372e

Please sign in to comment.