diff --git a/Dockerfile b/Dockerfile index 18224428..719868f3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM golang:1.22.5-alpine3.20 AS build WORKDIR $GOPATH/src/github.com/0xPolygon/cdk -RUN apk update && apk add --no-cache make build-base git +RUN apk update && apk add --no-cache make build-base git # INSTALL DEPENDENCIES COPY go.mod go.sum /src/ RUN cd /src && go mod download diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go index 42cd49a4..93ce347c 100644 --- a/aggoracle/chaingersender/evm.go +++ b/aggoracle/chaingersender/evm.go @@ -75,7 +75,7 @@ func NewEVMChainGERSender( func (c *EVMChainGERSender) IsGERAlreadyInjected(ger common.Hash) (bool, error) { timestamp, err := c.gerContract.GlobalExitRootMap(&bind.CallOpts{Pending: false}, ger) if err != nil { - return false, err + return false, fmt.Errorf("error calling gerContract.GlobalExitRootMap: %w", err) } return timestamp.Cmp(big.NewInt(0)) != 0, nil } @@ -86,28 +86,29 @@ func (c *EVMChainGERSender) UpdateGERWaitUntilMined(ctx context.Context, ger com return err } data, err := abi.Pack("updateGlobalExitRoot", ger) + if err != nil { + return err + } id, err := c.ethTxMan.Add(ctx, &c.gerAddr, nil, big.NewInt(0), data, c.gasOffset, nil) if err != nil { return err } for { time.Sleep(c.waitPeriodMonitorTx) + log.Debugf("waiting for tx %s to be mined", id.Hex()) res, err := c.ethTxMan.Result(ctx, id) if err != nil { log.Error("error calling ethTxMan.Result: ", err) } switch res.Status { - case ethtxmanager.MonitoredTxStatusCreated: - continue - case ethtxmanager.MonitoredTxStatusSent: + case ethtxmanager.MonitoredTxStatusCreated, + ethtxmanager.MonitoredTxStatusSent: continue case ethtxmanager.MonitoredTxStatusFailed: return fmt.Errorf("tx %s failed", res.ID) - case ethtxmanager.MonitoredTxStatusMined: - return nil - case ethtxmanager.MonitoredTxStatusSafe: - return nil - case ethtxmanager.MonitoredTxStatusFinalized: + case ethtxmanager.MonitoredTxStatusMined, + ethtxmanager.MonitoredTxStatusSafe, + ethtxmanager.MonitoredTxStatusFinalized: return nil default: log.Error("unexpected tx status: ", res.Status) diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go index ad1ff9c3..21ad642e 100644 --- a/aggoracle/e2e_test.go +++ b/aggoracle/e2e_test.go @@ -3,6 +3,7 @@ package aggoracle_test import ( "context" "errors" + "fmt" "math/big" "strconv" "testing" @@ -59,7 +60,7 @@ func commonSetup(t *testing.T) ( require.NoError(t, err) // Syncer dbPathSyncer := t.TempDir() - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0) + syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerL1Addr, common.Address{}, 10, etherman.LatestBlock, reorg, l1Client.Client(), time.Millisecond, 0) require.NoError(t, err) go syncer.Start(ctx) @@ -208,6 +209,6 @@ func runTest( require.NoError(t, err) isInjected, err := sender.IsGERAlreadyInjected(expectedGER) require.NoError(t, err) - require.True(t, isInjected) + require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) } } diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 291950c5..49d14b7e 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -9,17 +9,9 @@ import ( "github.com/0xPolygon/cdk/l1infotreesync" "github.com/0xPolygon/cdk/log" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend -} - type L1InfoTreer interface { GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) } @@ -31,7 +23,7 @@ type ChainSender interface { type AggOracle struct { ticker *time.Ticker - l1Client EthClienter + l1Client ethereum.ChainReader l1Info L1InfoTreer chainSender ChainSender blockFinality *big.Int @@ -39,7 +31,7 @@ type AggOracle struct { func New( chainSender ChainSender, - l1Client EthClienter, + l1Client ethereum.ChainReader, l1InfoTreeSyncer L1InfoTreer, blockFinalityType etherman.BlockNumberFinality, waitPeriodNextGER time.Duration, @@ -59,22 +51,30 @@ func New( } func (a *AggOracle) Start(ctx context.Context) { + var ( + blockNumToFetch uint64 + gerToInject common.Hash + err error + ) for { select { case <-a.ticker.C: - gerToInject, err := a.getLastFinalisedGER(ctx) + blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch) if err != nil { - if err == l1infotreesync.ErrBlockNotProcessed || err == l1infotreesync.ErrNotFound { - log.Debugf("syncer is not ready: %v", err) + if err == l1infotreesync.ErrBlockNotProcessed { + log.Debugf("syncer is not ready for the block %d", blockNumToFetch) + } else if err == l1infotreesync.ErrNotFound { + blockNumToFetch = 0 + log.Debugf("syncer has not found any GER until block %d", blockNumToFetch) } else { - log.Error("error calling isGERAlreadyInjected: ", err) + log.Error("error calling getLastFinalisedGER: ", err) } continue } - if alreadyInjectd, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil { + if alreadyInjected, err := a.chainSender.IsGERAlreadyInjected(gerToInject); err != nil { log.Error("error calling isGERAlreadyInjected: ", err) continue - } else if alreadyInjectd { + } else if alreadyInjected { log.Debugf("GER %s already injected", gerToInject.Hex()) continue } @@ -90,14 +90,21 @@ func (a *AggOracle) Start(ctx context.Context) { } } -func (a *AggOracle) getLastFinalisedGER(ctx context.Context) (common.Hash, error) { - header, err := a.l1Client.HeaderByNumber(ctx, a.blockFinality) - if err != nil { - return common.Hash{}, err +// getLastFinalisedGER tries to return a finalised GER: +// If blockNumToFetch != 0: it will try to fetch it until the given block +// Else it will ask the L1 client for the latest finalised block and use that +// If it fails to get the GER from the syncer, it will retunr the block number that used to query +func (a *AggOracle) getLastFinalisedGER(ctx context.Context, blockNumToFetch uint64) (uint64, common.Hash, error) { + if blockNumToFetch == 0 { + header, err := a.l1Client.HeaderByNumber(ctx, a.blockFinality) + if err != nil { + return 0, common.Hash{}, err + } + blockNumToFetch = header.Number.Uint64() } - info, err := a.l1Info.GetLatestInfoUntilBlock(ctx, header.Number.Uint64()) + info, err := a.l1Info.GetLatestInfoUntilBlock(ctx, blockNumToFetch) if err != nil { - return common.Hash{}, err + return blockNumToFetch, common.Hash{}, err } - return info.GlobalExitRoot, nil + return 0, info.GlobalExitRoot, nil } diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go index c9dceb77..10d79dbc 100644 --- a/bridgesync/bridgesync.go +++ b/bridgesync/bridgesync.go @@ -95,7 +95,7 @@ func new( return nil, err } if lastProcessedBlock < initialBlock { - err = processor.ProcessBlock(sync.Block{ + err = processor.ProcessBlock(ctx, sync.Block{ Num: initialBlock, }) if err != nil { diff --git a/bridgesync/processor.go b/bridgesync/processor.go index 36d34213..72fc5a01 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -5,11 +5,12 @@ import ( "encoding/binary" "encoding/json" "errors" - "log" "math/big" + "path" dbCommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" "github.com/ledgerwatch/erigon-lib/kv" @@ -19,8 +20,6 @@ import ( const ( eventsTableSufix = "-events" lastBlockTableSufix = "-lastBlock" - rootTableSufix = "-root" - rhtTableSufix = "-rht" ) var ( @@ -84,30 +83,26 @@ type processor struct { db kv.RwDB eventsTable string lastBlockTable string - tree *tree + exitTree *tree.AppendOnlyTree } func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, error) { eventsTable := dbPrefix + eventsTableSufix lastBlockTable := dbPrefix + lastBlockTableSufix - rootTable := dbPrefix + rootTableSufix - rhtTable := dbPrefix + rhtTableSufix db, err := mdbx.NewMDBX(nil). Path(dbPath). WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { return kv.TableCfg{ eventsTable: {}, lastBlockTable: {}, - rootTable: {}, - rhtTable: {}, } }). Open() if err != nil { return nil, err } - - tree, err := newTree(ctx, rhtTable, rootTable, db) + exitTreeDBPath := path.Join(dbPath, "exittree") + exitTree, err := tree.NewAppendOnly(ctx, exitTreeDBPath, dbPrefix) if err != nil { return nil, err } @@ -115,7 +110,7 @@ func newProcessor(ctx context.Context, dbPath, dbPrefix string) (*processor, err db: db, eventsTable: eventsTable, lastBlockTable: lastBlockTable, - tree: tree, + exitTree: exitTree, }, nil } @@ -144,7 +139,7 @@ func (p *processor) GetClaimsAndBridges( } defer c.Close() - for k, v, err := c.Seek(dbCommon.Uint64To2Bytes(fromBlock)); k != nil; k, v, err = c.Next() { + for k, v, err := c.Seek(dbCommon.Uint64ToBytes(fromBlock)); k != nil; k, v, err = c.Next() { if err != nil { return nil, err } @@ -181,8 +176,8 @@ func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { } } -func (p *processor) Reorg(firstReorgedBlock uint64) error { - tx, err := p.db.BeginRw(context.Background()) +func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + tx, err := p.db.BeginRw(ctx) if err != nil { return err } @@ -191,7 +186,7 @@ func (p *processor) Reorg(firstReorgedBlock uint64) error { return err } defer c.Close() - firstKey := dbCommon.Uint64To2Bytes(firstReorgedBlock) + firstKey := dbCommon.Uint64ToBytes(firstReorgedBlock) firstDepositCountReorged := int64(-1) for k, v, err := c.Seek(firstKey); k != nil; k, _, err = c.Next() { if err != nil { @@ -221,13 +216,7 @@ func (p *processor) Reorg(firstReorgedBlock uint64) error { return err } if firstDepositCountReorged != -1 { - var lastValidDepositCount uint32 - if firstDepositCountReorged == 0 { - lastValidDepositCount = 0 - } else { - lastValidDepositCount = uint32(firstDepositCountReorged) - 1 - } - if err := p.tree.reorg(tx, lastValidDepositCount); err != nil { + if err := p.exitTree.Reorg(ctx, uint32(firstDepositCountReorged)); err != nil { tx.Rollback() return err } @@ -235,8 +224,7 @@ func (p *processor) Reorg(firstReorgedBlock uint64) error { return tx.Commit() } -func (p *processor) ProcessBlock(block sync.Block) error { - ctx := context.Background() +func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { tx, err := p.db.BeginRw(ctx) if err != nil { return err @@ -256,7 +244,7 @@ func (p *processor) ProcessBlock(block sync.Block) error { tx.Rollback() return err } - if err := tx.Put(p.eventsTable, dbCommon.Uint64To2Bytes(block.Num), value); err != nil { + if err := tx.Put(p.eventsTable, dbCommon.Uint64ToBytes(block.Num), value); err != nil { tx.Rollback() return err } @@ -267,24 +255,21 @@ func (p *processor) ProcessBlock(block sync.Block) error { return err } - for i, bridge := range bridges { - if err := p.tree.addLeaf(tx, bridge.DepositCount, bridge.Hash()); err != nil { - if i != 0 { - tx.Rollback() - if err2 := p.tree.initLastLeftCacheAndLastDepositCount(ctx); err2 != nil { - log.Fatalf( - "after failing to add a leaf to the tree with error: %v, error initializing the cache with error: %v", - err, err2, - ) - } - return err - } - } + leaves := []tree.Leaf{} + for _, bridge := range bridges { + leaves = append(leaves, tree.Leaf{ + Index: bridge.DepositCount, + Hash: bridge.Hash(), + }) + } + if err := p.exitTree.AddLeaves(ctx, leaves); err != nil { + tx.Rollback() + return err } return tx.Commit() } func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { - blockNumBytes := dbCommon.Uint64To2Bytes(blockNum) + blockNumBytes := dbCommon.Uint64ToBytes(blockNum) return tx.Put(p.lastBlockTable, lastBlokcKey, blockNumBytes) } diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go index c1535d70..7d337d08 100644 --- a/bridgesync/processor_test.go +++ b/bridgesync/processor_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree/testvectors" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" ) @@ -391,7 +392,7 @@ func (a *reorgAction) desc() string { } func (a *reorgAction) execute(t *testing.T) { - actualErr := a.p.Reorg(a.firstReorgedBlock) + actualErr := a.p.Reorg(context.Background(), a.firstReorgedBlock) require.Equal(t, a.expectedErr, actualErr) } @@ -413,7 +414,7 @@ func (a *processBlockAction) desc() string { } func (a *processBlockAction) execute(t *testing.T) { - actualErr := a.p.ProcessBlock(a.block) + actualErr := a.p.ProcessBlock(context.Background(), a.block) require.Equal(t, a.expectedErr, actualErr) } @@ -425,23 +426,11 @@ func eventsToBridgeEvents(events []interface{}) []Event { return bridgeEvents } -// DepositVectorRaw represents the deposit vector -type DepositVectorRaw struct { - OriginalNetwork uint32 `json:"originNetwork"` - TokenAddress string `json:"tokenAddress"` - Amount string `json:"amount"` - DestinationNetwork uint32 `json:"destinationNetwork"` - DestinationAddress string `json:"destinationAddress"` - ExpectedHash string `json:"leafValue"` - CurrentHash string `json:"currentLeafValue"` - Metadata string `json:"metadata"` -} - func TestHashBridge(t *testing.T) { - data, err := os.ReadFile("testvectors/leaf-vectors.json") + data, err := os.ReadFile("../tree/testvectors/leaf-vectors.json") require.NoError(t, err) - var leafVectors []DepositVectorRaw + var leafVectors []testvectors.DepositVectorRaw err = json.Unmarshal(data, &leafVectors) require.NoError(t, err) diff --git a/bridgesync/tree.go b/bridgesync/tree.go deleted file mode 100644 index 9972af64..00000000 --- a/bridgesync/tree.go +++ /dev/null @@ -1,325 +0,0 @@ -package bridgesync - -import ( - "context" - "fmt" - "math" - - dbCommon "github.com/0xPolygon/cdk/common" - "github.com/ethereum/go-ethereum/common" - "github.com/ledgerwatch/erigon-lib/kv" - "golang.org/x/crypto/sha3" -) - -const ( - defaultHeight uint8 = 32 -) - -type tree struct { - db kv.RwDB - rhtTable string - rootTable string - height uint8 - lastDepositCount int64 - lastLeftCache []common.Hash - zeroHashes []common.Hash -} - -type treeNode struct { - left common.Hash - right common.Hash -} - -func (n *treeNode) hash() common.Hash { - var hash common.Hash - hasher := sha3.NewLegacyKeccak256() - hasher.Write(n.left[:]) - hasher.Write(n.right[:]) - copy(hash[:], hasher.Sum(nil)) - return hash -} - -func (n *treeNode) MarshalBinary() ([]byte, error) { - return append(n.left[:], n.right[:]...), nil -} - -func (n *treeNode) UnmarshalBinary(data []byte) error { - if len(data) != 64 { - return fmt.Errorf("expected len %d, actual len %d", 64, len(data)) - } - n.left = common.Hash(data[:32]) - n.right = common.Hash(data[32:]) - return nil -} - -func newTree(ctx context.Context, rhtTable, rootTable string, db kv.RwDB) (*tree, error) { - t := &tree{ - rhtTable: rhtTable, - rootTable: rootTable, - db: db, - height: defaultHeight, - zeroHashes: generateZeroHashes(defaultHeight), - } - - if err := t.initLastLeftCacheAndLastDepositCount(ctx); err != nil { - return nil, err - } - - return t, nil -} - -// getProof returns the merkle proof for a given deposit count and root. -func (t *tree) getProof(ctx context.Context, depositCount uint32, root common.Hash) ([]common.Hash, error) { - tx, err := t.db.BeginRw(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - siblings := make([]common.Hash, int(t.height)) - - currentNodeHash := root - // It starts in height-1 because 0 is the level of the leafs - for h := int(t.height - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(tx, currentNodeHash) - if err != nil { - return nil, fmt.Errorf( - "height: %d, currentNode: %s, error: %v", - h, currentNodeHash.Hex(), err, - ) - } - /* - * Root (level h=3 => height=4) - * / \ - * O5 O6 (level h=2) - * / \ / \ - * O1 O2 O3 O4 (level h=1) - * /\ /\ /\ /\ - * 0 1 2 3 4 5 6 7 Leafs (level h=0) - * Example 1: - * Choose index = 3 => 011 binary - * Assuming we are in level 1 => h=1; 1< 011&010=010 which is higher than 0 so we need the left sibling (O1) - * Example 2: - * Choose index = 4 => 100 binary - * Assuming we are in level 1 => h=1; 1< 100&010=000 which is not higher than 0 so we need the right sibling (O4) - * Example 3: - * Choose index = 4 => 100 binary - * Assuming we are in level 2 => h=2; 1< 100&100=100 which is higher than 0 so we need the left sibling (O5) - */ - if depositCount&(1< 0 { - siblings = append(siblings, currentNode.left) - currentNodeHash = currentNode.right - } else { - siblings = append(siblings, currentNode.right) - currentNodeHash = currentNode.left - } - } - - // Reverse siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { - siblings[i], siblings[j] = siblings[j], siblings[i] - } - - return siblings, nil -} - -func (t *tree) addLeaf(tx kv.RwTx, depositCount uint32, hash common.Hash) error { - // Sanity check - if int64(depositCount) != t.lastDepositCount+1 { - return fmt.Errorf( - "mismatched index. Expected: %d, actual: %d", - t.lastDepositCount+1, depositCount, - ) - } - - // Calculate new tree nodes - currentChildHash := hash - newNodes := []treeNode{} - for h := uint8(0); h < t.height; h++ { - var parent treeNode - if depositCount&(1< 0 { - // Add child to the right - parent = treeNode{ - left: t.lastLeftCache[h], - right: currentChildHash, - } - } else { - // Add child to the left - parent = treeNode{ - left: currentChildHash, - right: t.zeroHashes[h], - } - // Update cache - // TODO: review this part of the logic, skipping ?optimizaton? - // from OG implementation - t.lastLeftCache[h] = currentChildHash - } - currentChildHash = parent.hash() - newNodes = append(newNodes, parent) - } - - // store root - root := currentChildHash - if err := tx.Put(t.rootTable, dbCommon.Uint32ToBytes(depositCount), root[:]); err != nil { - return err - } - - // store nodes - for _, node := range newNodes { - value, err := node.MarshalBinary() - if err != nil { - return err - } - if err := tx.Put(t.rhtTable, node.hash().Bytes(), value); err != nil { - return err - } - } - - t.lastDepositCount++ - return nil -} - -func (t *tree) initLastLeftCacheAndLastDepositCount(ctx context.Context) error { - tx, err := t.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - root, err := t.initLastDepositCount(tx) - if err != nil { - return err - } - return t.initLastLeftCache(tx, t.lastDepositCount, root) -} - -// getLastDepositCountAndRoot return the deposit count and the root associated to the last deposit. -// If deposit count == -1, it means no deposit added yet -func (t *tree) getLastDepositCountAndRoot(tx kv.Tx) (int64, common.Hash, error) { - iter, err := tx.RangeDescend( - t.rootTable, - dbCommon.Uint32ToBytes(math.MaxUint32), - dbCommon.Uint32ToBytes(0), - 1, - ) - if err != nil { - return 0, common.Hash{}, err - } - - lastDepositCountBytes, rootBytes, err := iter.Next() - if err != nil { - return 0, common.Hash{}, err - } - if lastDepositCountBytes == nil { - return -1, common.Hash{}, nil - } - return int64(dbCommon.BytesToUint32(lastDepositCountBytes)), common.Hash(rootBytes), nil -} - -func (t *tree) initLastDepositCount(tx kv.Tx) (common.Hash, error) { - ldc, root, err := t.getLastDepositCountAndRoot(tx) - if err != nil { - return common.Hash{}, err - } - t.lastDepositCount = ldc - return root, nil -} - -func (t *tree) initLastLeftCache(tx kv.Tx, lastDepositCount int64, lastRoot common.Hash) error { - siblings := make([]common.Hash, t.height, t.height) - if lastDepositCount == -1 { - t.lastLeftCache = siblings - return nil - } - index := lastDepositCount - - currentNodeHash := lastRoot - // It starts in height-1 because 0 is the level of the leafs - for h := int(t.height - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(tx, currentNodeHash) - if err != nil { - return fmt.Errorf( - "error getting node %s from the RHT at height %d with root %s: %v", - currentNodeHash.Hex(), h, lastRoot.Hex(), err, - ) - } - if currentNode == nil { - return ErrNotFound - } - siblings = append(siblings, currentNode.left) - if index&(1< 0 { - currentNodeHash = currentNode.right - } else { - currentNodeHash = currentNode.left - } - } - - // Reverse the siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { - siblings[i], siblings[j] = siblings[j], siblings[i] - } - - t.lastLeftCache = siblings - return nil -} - -func (t *tree) getRHTNode(tx kv.Tx, nodeHash common.Hash) (*treeNode, error) { - nodeBytes, err := tx.GetOne(t.rhtTable, nodeHash[:]) - if err != nil { - return nil, err - } - if nodeBytes == nil { - return nil, ErrNotFound - } - node := &treeNode{} - err = node.UnmarshalBinary(nodeBytes) - return node, err -} - -func (t *tree) reorg(tx kv.RwTx, lastValidDepositCount uint32) error { - if t.lastDepositCount == -1 { - return nil - } - // Clean root table - for i := lastValidDepositCount + 1; i <= uint32(t.lastDepositCount); i++ { - if err := tx.Delete(t.rootTable, dbCommon.Uint32ToBytes(i)); err != nil { - return err - } - } - - // Reset cache - rootBytes, err := tx.GetOne(t.rootTable, dbCommon.Uint32ToBytes(lastValidDepositCount)) - if err != nil { - return err - } - if rootBytes == nil { - return ErrNotFound - } - err = t.initLastLeftCache(tx, int64(lastValidDepositCount), common.Hash(rootBytes)) // 0x619a9fedbe029225288d32e39e06fb868ed0d8f20db26047cf0ef8d3582b5f6e - if err != nil { - return err - } - - // Note: not cleaning RHT, not worth it - t.lastDepositCount = int64(lastValidDepositCount) - return nil -} - -func generateZeroHashes(height uint8) []common.Hash { - var zeroHashes = []common.Hash{ - {}, - } - // This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels, - // we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes. - for i := 1; i <= int(height); i++ { - hasher := sha3.NewLegacyKeccak256() - hasher.Write(zeroHashes[i-1][:]) - hasher.Write(zeroHashes[i-1][:]) - thisHeightHash := common.Hash{} - copy(thisHeightHash[:], hasher.Sum(nil)) - zeroHashes = append(zeroHashes, thisHeightHash) - } - return zeroHashes -} diff --git a/bridgesync/tree_test.go b/bridgesync/tree_test.go deleted file mode 100644 index 5624ad47..00000000 --- a/bridgesync/tree_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package bridgesync - -import ( - "context" - "encoding/json" - "fmt" - "math/big" - "os" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -// MTRootVectorRaw represents the root of Merkle Tree -type MTRootVectorRaw struct { - ExistingLeaves []string `json:"previousLeafsValues"` - CurrentRoot string `json:"currentRoot"` - NewLeaf DepositVectorRaw `json:"newLeaf"` - NewRoot string `json:"newRoot"` -} - -func TestMTAddLeaf(t *testing.T) { - data, err := os.ReadFile("testvectors/root-vectors.json") - require.NoError(t, err) - - var mtTestVectors []MTRootVectorRaw - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - ctx := context.Background() - - for ti, testVector := range mtTestVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - - path := t.TempDir() - p, err := newProcessor(context.Background(), path, "foo") - require.NoError(t, err) - - // Add exisiting leaves - for i, leaf := range testVector.ExistingLeaves { - tx, err := p.db.BeginRw(ctx) - require.NoError(t, err) - err = p.tree.addLeaf(tx, uint32(i), common.HexToHash(leaf)) - require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) - } - if len(testVector.ExistingLeaves) > 0 { - txRo, err := p.db.BeginRo(ctx) - require.NoError(t, err) - _, actualRoot, err := p.tree.getLastDepositCountAndRoot(txRo) - txRo.Rollback() - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.CurrentRoot), actualRoot) - } - - // Add new bridge - amount, result := big.NewInt(0).SetString(testVector.NewLeaf.Amount, 0) - require.True(t, result) - bridge := Bridge{ - OriginNetwork: testVector.NewLeaf.OriginalNetwork, - OriginAddress: common.HexToAddress(testVector.NewLeaf.TokenAddress), - Amount: amount, - DestinationNetwork: testVector.NewLeaf.DestinationNetwork, - DestinationAddress: common.HexToAddress(testVector.NewLeaf.DestinationAddress), - DepositCount: uint32(len(testVector.ExistingLeaves)), - Metadata: common.FromHex(testVector.NewLeaf.Metadata), - } - tx, err := p.db.BeginRw(ctx) - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.NewLeaf.CurrentHash), bridge.Hash()) - err = p.tree.addLeaf(tx, bridge.DepositCount, bridge.Hash()) - require.NoError(t, err) - err = tx.Commit() - txRo, err := p.db.BeginRo(ctx) - require.NoError(t, err) - _, actualRoot, err := p.tree.getLastDepositCountAndRoot(txRo) - txRo.Rollback() - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.NewRoot), actualRoot) - }) - } -} - -// MTClaimVectorRaw represents the merkle proof -type MTClaimVectorRaw struct { - Deposits []DepositVectorRaw `json:"leafs"` - Index uint32 `json:"index"` - MerkleProof []string `json:"proof"` - ExpectedRoot string `json:"root"` -} - -func TestMTGetProof(t *testing.T) { - data, err := os.ReadFile("testvectors/claim-vectors.json") - require.NoError(t, err) - - var mtTestVectors []MTClaimVectorRaw - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - ctx := context.Background() - - for ti, testVector := range mtTestVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - path := t.TempDir() - p, err := newProcessor(context.Background(), path, "foo") - require.NoError(t, err) - - for li, leaf := range testVector.Deposits { - amount, result := big.NewInt(0).SetString(leaf.Amount, 0) - require.True(t, result) - bridge := &Bridge{ - OriginNetwork: leaf.OriginalNetwork, - OriginAddress: common.HexToAddress(leaf.TokenAddress), - Amount: amount, - DestinationNetwork: leaf.DestinationNetwork, - DestinationAddress: common.HexToAddress(leaf.DestinationAddress), - DepositCount: uint32(li), - Metadata: common.FromHex(leaf.Metadata), - } - tx, err := p.db.BeginRw(ctx) - require.NoError(t, err) - err = p.tree.addLeaf(tx, bridge.DepositCount, bridge.Hash()) - require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) - } - txRo, err := p.db.BeginRo(ctx) - require.NoError(t, err) - _, actualRoot, err := p.tree.getLastDepositCountAndRoot(txRo) - txRo.Rollback() - expectedRoot := common.HexToHash(testVector.ExpectedRoot) - require.Equal(t, expectedRoot, actualRoot) - - proof, err := p.tree.getProof(ctx, testVector.Index, expectedRoot) - require.NoError(t, err) - for i, sibling := range testVector.MerkleProof { - require.Equal(t, common.HexToHash(sibling), proof[i]) - } - }) - } -} diff --git a/cmd/run.go b/cmd/run.go index 4bb24fc4..ef228f5e 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -382,6 +382,7 @@ func newL1InfoTreeSyncer( ctx, cfg.L1InfoTreeSync.DBPath, cfg.L1InfoTreeSync.GlobalExitRootAddr, + cfg.L1InfoTreeSync.RollupManagerAddr, cfg.L1InfoTreeSync.SyncBlockChunkSize, etherman.BlockNumberFinality(cfg.L1InfoTreeSync.BlockFinality), reorgDetector, diff --git a/common/common.go b/common/common.go index d2f440d1..ebbafd69 100644 --- a/common/common.go +++ b/common/common.go @@ -9,27 +9,27 @@ import ( "github.com/iden3/go-iden3-crypto/keccak256" ) -// Uint64To2Bytes converts a block number to a byte slice -func Uint64To2Bytes(num uint64) []byte { +// Uint64ToBytes converts a uint64 to a byte slice +func Uint64ToBytes(num uint64) []byte { bytes := make([]byte, 8) binary.LittleEndian.PutUint64(bytes, num) return bytes } -// BytesToUint64 converts a byte slice to a block number +// BytesToUint64 converts a byte slice to a uint64 func BytesToUint64(bytes []byte) uint64 { return binary.LittleEndian.Uint64(bytes) } -// Uint32To2Bytes converts a block number to a byte slice +// Uint32To2Bytes converts a uint32 to a byte slice func Uint32ToBytes(num uint32) []byte { key := make([]byte, 4) binary.LittleEndian.PutUint32(key, num) return key } -// BytesToUint32 converts a byte slice to a block number +// BytesToUint32 converts a byte slice to a uint32 func BytesToUint32(bytes []byte) uint32 { return binary.LittleEndian.Uint32(bytes) } diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index 4673a4b5..2d9d7589 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -1,11 +1,15 @@ package datacommittee import ( + "errors" + "fmt" "math/big" "testing" + smcparis "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana-paris/polygondatacommittee" "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygondatacommittee" "github.com/0xPolygon/cdk/log" + erc1967proxy "github.com/0xPolygon/cdk/test/contracts/erc1967proxy" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -16,7 +20,7 @@ import ( ) func TestUpdateDataCommitteeEvent(t *testing.T) { - t.Skip("This test is not working because the simulated backend doesnt accept PUSH0, check: https://github.com/ethereum/go-ethereum/issues/28144#issuecomment-2247124776") + //t.Skip("This test is not working because the simulated backend doesnt accept PUSH0, check: https://github.com/ethereum/go-ethereum/issues/28144#issuecomment-2247124776") // Set up testing environment dac, ethBackend, auth, da := newTestingEnv(t) @@ -109,16 +113,22 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( client := simulated.NewBackend(genesisAlloc, simulated.WithBlockGasLimit(blockGasLimit)) // DAC Setup - _, _, da, err = polygondatacommittee.DeployPolygondatacommittee(auth, client.Client()) + addr, _, _, err := smcparis.DeployPolygondatacommittee(auth, client.Client()) if err != nil { return &Backend{}, nil, nil, err } client.Commit() - _, err = da.Initialize(auth) + proxyAddr, err := deployDACProxy(auth, client.Client(), addr) if err != nil { return &Backend{}, nil, nil, err } + client.Commit() + da, err = polygondatacommittee.NewPolygondatacommittee(proxyAddr, client.Client()) + if err != nil { + return &Backend{}, nil, nil, err + } + _, err = da.SetupCommittee(auth, big.NewInt(0), []string{}, []byte{}) if err != nil { return &Backend{}, nil, nil, err @@ -130,3 +140,42 @@ func newSimulatedDacman(t *testing.T, auth *bind.TransactOpts) ( } return c, client, da, nil } + +func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImpl common.Address) (common.Address, error) { + // Deploy proxy + dacABI, err := polygondatacommittee.PolygondatacommitteeMetaData.GetAbi() + if err != nil { + return common.Address{}, err + } + if dacABI == nil { + return common.Address{}, errors.New("GetABI returned nil") + } + initializeCallData, err := dacABI.Pack("initialize") + if err != nil { + return common.Address{}, err + } + proxyAddr, err := deployProxy( + auth, + client, + dacImpl, + initializeCallData, + ) + if err != nil { + return common.Address{}, err + } + fmt.Println("DAC proxy deployed at", proxyAddr) + return proxyAddr, nil +} + +func deployProxy(auth *bind.TransactOpts, + client bind.ContractBackend, + implementationAddr common.Address, + initializeParams []byte) (common.Address, error) { + addr, _, _, err := erc1967proxy.DeployErc1967proxy( + auth, + client, + implementationAddr, + initializeParams, + ) + return addr, err +} diff --git a/go.mod b/go.mod index d40eaa56..5b2fe026 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/0xPolygon/cdk go 1.22.4 require ( - github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726101945-d05a885ae884 + github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726125827-301fa4c59245 github.com/0xPolygon/cdk-data-availability v0.0.8 github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d github.com/0xPolygonHermez/zkevm-data-streamer v0.2.3-RC4 diff --git a/go.sum b/go.sum index 6b5b219a..a1c78e52 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726101945-d05a885ae884 h1:oXUct6UWuGs15WyCEKipY0Kc0BsCnMzniAz0EIFoPxs= -github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726101945-d05a885ae884/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726125827-301fa4c59245 h1:BBmVd50JQID9UyUR3vWFMKr2pMHD3mrqjpuB9DDepBw= +github.com/0xPolygon/cdk-contracts-tooling v0.0.0-20240726125827-301fa4c59245/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= github.com/0xPolygon/cdk-data-availability v0.0.8 h1:bMmOYZ7Ei683y80ric3KzMPXtRGmchAmfjIRzghaHb4= github.com/0xPolygon/cdk-data-availability v0.0.8/go.mod h1:3XkZ0zn0GsvAT01MPQMmukF534CVSFmtrcoK3F/BK6Q= github.com/0xPolygon/cdk-rpc v0.0.0-20240419104226-c0a62ba0f49d h1:sxh6hZ2jF/sxxj2jd5o1vuNNCZjYmn4aRG9SRlVaEFs= diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go index 255395dd..dbe6950a 100644 --- a/l1infotreesync/downloader.go +++ b/l1infotreesync/downloader.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonzkevmglobalexitrootv2" + "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonrollupmanager" "github.com/0xPolygon/cdk/sync" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -13,7 +14,9 @@ import ( ) var ( - updateL1InfoTreeSignature = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) + updateL1InfoTreeSignature = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) + verifyBatchesSignature = crypto.Keccak256Hash([]byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)")) + verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash([]byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)")) ) type EthClienter interface { @@ -23,27 +26,68 @@ type EthClienter interface { bind.ContractBackend } -func buildAppender(client EthClienter, globalExitRoot common.Address) (sync.LogAppenderMap, error) { - contract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) +func buildAppender(client EthClienter, globalExitRoot, rollupManager common.Address) (sync.LogAppenderMap, error) { + ger, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) + if err != nil { + return nil, err + } + rm, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) if err != nil { return nil, err } appender := make(sync.LogAppenderMap) appender[updateL1InfoTreeSignature] = func(b *sync.EVMBlock, l types.Log) error { - l1InfoTreeUpdate, err := contract.ParseUpdateL1InfoTree(l) + l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTree(l) if err != nil { return fmt.Errorf( - "error parsing log %+v using contract.ParseUpdateL1InfoTree: %v", + "error parsing log %+v using ger.ParseUpdateL1InfoTree: %v", l, err, ) } - b.Events = append(b.Events, Event{ + b.Events = append(b.Events, Event{UpdateL1InfoTree: &UpdateL1InfoTree{ MainnetExitRoot: l1InfoTreeUpdate.MainnetExitRoot, RollupExitRoot: l1InfoTreeUpdate.RollupExitRoot, ParentHash: b.ParentHash, Timestamp: b.Timestamp, - }) + }}) + return nil + } + appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error { + verifyBatches, err := rm.ParseVerifyBatches(l) + if err != nil { + return fmt.Errorf( + "error parsing log %+v using rm.ParseVerifyBatches: %v", + l, err, + ) + } + fmt.Println(verifyBatches) + b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ + RollupID: verifyBatches.RollupID, + NumBatch: verifyBatches.NumBatch, + StateRoot: verifyBatches.StateRoot, + ExitRoot: verifyBatches.ExitRoot, + Aggregator: verifyBatches.Aggregator, + }}) return nil } + appender[verifyBatchesTrustedAggregatorSignature] = func(b *sync.EVMBlock, l types.Log) error { + verifyBatches, err := rm.ParseVerifyBatchesTrustedAggregator(l) + if err != nil { + return fmt.Errorf( + "error parsing log %+v using rm.ParseVerifyBatches: %v", + l, err, + ) + } + fmt.Println(verifyBatches) + b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ + RollupID: verifyBatches.RollupID, + NumBatch: verifyBatches.NumBatch, + StateRoot: verifyBatches.StateRoot, + ExitRoot: verifyBatches.ExitRoot, + Aggregator: verifyBatches.Aggregator, + }}) + return nil + } + return appender, nil } diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 35958f08..92762150 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -54,7 +54,7 @@ func TestE2E(t *testing.T) { rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) client, gerAddr, gerSc, err := newSimulatedClient(auth) require.NoError(t, err) - syncer, err := New(ctx, dbPath, gerAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0) + syncer, err := New(ctx, dbPath, gerAddr, common.Address{}, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0) require.NoError(t, err) go syncer.Start(ctx) diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go index ed9056bc..67342613 100644 --- a/l1infotreesync/l1infotreesync.go +++ b/l1infotreesync/l1infotreesync.go @@ -23,6 +23,7 @@ var ( type Config struct { DBPath string `mapstructure:"DBPath"` GlobalExitRootAddr common.Address `mapstructure:"GlobalExitRootAddr"` + RollupManagerAddr common.Address `mapstructure:"RollupManagerAddr"` SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` // TODO: BlockFinality doesnt work as per the jsonschema BlockFinality string `jsonschema:"enum=latest,enum=safe, enum=pending, enum=finalized" mapstructure:"BlockFinality"` @@ -39,7 +40,7 @@ type L1InfoTreeSync struct { func New( ctx context.Context, dbPath string, - globalExitRoot common.Address, + globalExitRoot, rollupManager common.Address, syncBlockChunkSize uint64, blockFinalityType etherman.BlockNumberFinality, rd sync.ReorgDetector, @@ -57,7 +58,7 @@ func New( return nil, err } if lastProcessedBlock < initialBlock { - err = processor.ProcessBlock(sync.Block{ + err = processor.ProcessBlock(ctx, sync.Block{ Num: initialBlock, }) if err != nil { @@ -65,7 +66,7 @@ func New( } } - appender, err := buildAppender(l1Client, globalExitRoot) + appender, err := buildAppender(l1Client, globalExitRoot, rollupManager) if err != nil { return nil, err } @@ -75,7 +76,7 @@ func New( blockFinalityType, waitForNewBlocksPeriod, appender, - []common.Address{globalExitRoot}, + []common.Address{globalExitRoot, rollupManager}, ) if err != nil { return nil, err diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index 2e6116c4..927eff82 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -2,14 +2,15 @@ package l1infotreesync import ( "context" - "encoding/binary" "encoding/json" "errors" + "fmt" + "path" "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/l1infotree" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sync" + "github.com/0xPolygon/cdk/tree" ethCommon "github.com/ethereum/go-ethereum/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" @@ -17,11 +18,31 @@ import ( ) const ( - rootTable = "l1infotreesync-root" - indexTable = "l1infotreesync-index" - infoTable = "l1infotreesync-info" - blockTable = "l1infotreesync-block" - lastBlockTable = "l1infotreesync-lastBlock" + dbPrefix = "l1infotreesync" + // rootTable stores the L1 info tree roots + // Key: root (common.Hash) + // Value: hash of the leaf that caused the update (common.Hash) + rootTable = dbPrefix + "-root" + // indexTable stores the L1 info tree indexes + // Key: index (uint32 converted to bytes) + // Value: hash of the leaf that caused the update (common.Hash) + indexTable = dbPrefix + "-index" + // infoTable stores the information of the tree (the leaves). Note that the value + // of rootTable and indexTable references the key of the infoTable + // Key: hash of the leaf that caused the update (common.Hash) + // Value: JSON of storeLeaf struct + infoTable = dbPrefix + "-info" + // blockTable stores the first and last index of L1 Info Tree that have been updated on + // a block. This is useful in case there are blocks with multiple updates and a reorg is needed. + // Or for when querying by block number + // Key: block number (uint64 converted to bytes) + // Value: JSON of blockWithLeafs + blockTable = dbPrefix + "-block" + // lastBlockTable used to store the last block processed. This is needed to know the last processed blcok + // when it doesn't have events that make other tables get populated + // Key: it's always lastBlockKey + // Value: block number (uint64 converted to bytes) + lastBlockTable = dbPrefix + "-lastBlock" treeHeight uint8 = 32 ) @@ -29,21 +50,36 @@ const ( var ( ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") ErrNotFound = errors.New("not found") - lastBlokcKey = []byte("lb") + ErrNoBlock0 = errors.New("blockNum must be greater than 0") + lastBlockKey = []byte("lb") ) type processor struct { - db kv.RwDB - tree *l1infotree.L1InfoTree + db kv.RwDB + l1InfoTree *l1infotree.L1InfoTree + rollupExitTree *tree.UpdatableTree } -type Event struct { +type UpdateL1InfoTree struct { MainnetExitRoot ethCommon.Hash RollupExitRoot ethCommon.Hash ParentHash ethCommon.Hash Timestamp uint64 } +type VerifyBatches struct { + RollupID uint32 + NumBatch uint64 + StateRoot ethCommon.Hash + ExitRoot ethCommon.Hash + Aggregator ethCommon.Address +} + +type Event struct { + UpdateL1InfoTree *UpdateL1InfoTree + VerifyBatches *VerifyBatches +} + type L1InfoTreeLeaf struct { L1InfoTreeRoot ethCommon.Hash L1InfoTreeIndex uint32 @@ -106,11 +142,17 @@ func newProcessor(ctx context.Context, dbPath string) (*processor, error) { if err != nil { return nil, err } - tree, err := l1infotree.NewL1InfoTree(treeHeight, leaves) + l1InfoTree, err := l1infotree.NewL1InfoTree(treeHeight, leaves) if err != nil { return nil, err } - p.tree = tree + p.l1InfoTree = l1InfoTree + rollupExitTreeDBPath := path.Join(dbPath, "rollupExitTree") + rollupExitTree, err := tree.NewUpdatable(ctx, rollupExitTreeDBPath, dbPrefix) + if err != nil { + return nil, err + } + p.rollupExitTree = rollupExitTree return p, nil } @@ -130,7 +172,7 @@ func (p *processor) getAllLeavesHashed(ctx context.Context) ([][32]byte, error) return nil, err } - return p.getHasedLeaves(tx, index) + return p.getHashedLeaves(tx, index) } func (p *processor) ComputeMerkleProofByIndex(ctx context.Context, index uint32) ([][32]byte, ethCommon.Hash, error) { @@ -143,14 +185,14 @@ func (p *processor) ComputeMerkleProofByIndex(ctx context.Context, index uint32) } defer tx.Rollback() - leaves, err := p.getHasedLeaves(tx, index) + leaves, err := p.getHashedLeaves(tx, index) if err != nil { return nil, ethCommon.Hash{}, err } - return p.tree.ComputeMerkleProof(index, leaves) + return p.l1InfoTree.ComputeMerkleProof(index, leaves) } -func (p *processor) getHasedLeaves(tx kv.Tx, untilIndex uint32) ([][32]byte, error) { +func (p *processor) getHashedLeaves(tx kv.Tx, untilIndex uint32) ([][32]byte, error) { leaves := [][32]byte{} for i := uint32(0); i <= untilIndex; i++ { info, err := p.getInfoByIndexWithTx(tx, i) @@ -187,29 +229,37 @@ func (p *processor) GetInfoByRoot(ctx context.Context, root ethCommon.Hash) (*L1 return p.getInfoByHashWithTx(tx, hash) } -// GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occured before or at blockNum. +// GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. // If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { + if blockNum == 0 { + return nil, ErrNoBlock0 + } tx, err := p.db.BeginRo(ctx) if err != nil { return nil, err } defer tx.Rollback() lpb, err := p.getLastProcessedBlockWithTx(tx) + if err != nil { + return nil, err + } if lpb < blockNum { return nil, ErrBlockNotProcessed } - iter, err := tx.RangeDescend(blockTable, uint64ToBytes(blockNum), uint64ToBytes(0), 1) + iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(blockNum), common.Uint64ToBytes(0), 1) if err != nil { - return nil, err - } - if !iter.HasNext() { - return nil, ErrNotFound + return nil, fmt.Errorf( + "error calling RangeDescend(blockTable, %d, 0, 1): %w", blockNum, err, + ) } - _, v, err := iter.Next() + k, v, err := iter.Next() if err != nil { return nil, err } + if k == nil { + return nil, ErrNotFound + } blk := blockWithLeafs{} if err := json.Unmarshal(v, &blk); err != nil { return nil, err @@ -285,18 +335,18 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { } func (p *processor) getLastProcessedBlockWithTx(tx kv.Tx) (uint64, error) { - if blockNumBytes, err := tx.GetOne(lastBlockTable, lastBlokcKey); err != nil { + blockNumBytes, err := tx.GetOne(lastBlockTable, lastBlockKey) + if err != nil { return 0, err } else if blockNumBytes == nil { return 0, nil - } else { - return bytes2Uint64(blockNumBytes), nil } + return common.BytesToUint64(blockNumBytes), nil } -func (p *processor) Reorg(firstReorgedBlock uint64) error { +func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { // TODO: Does tree need to be reorged? - tx, err := p.db.BeginRw(context.Background()) + tx, err := p.db.BeginRw(ctx) if err != nil { return err } @@ -305,7 +355,7 @@ func (p *processor) Reorg(firstReorgedBlock uint64) error { return err } defer c.Close() - firstKey := uint64ToBytes(firstReorgedBlock) + firstKey := common.Uint64ToBytes(firstReorgedBlock) for blkKey, blkValue, err := c.Seek(firstKey); blkKey != nil; blkKey, blkValue, err = c.Next() { if err != nil { tx.Rollback() @@ -371,8 +421,8 @@ func (p *processor) deleteLeaf(tx kv.RwTx, index uint32) error { // ProcessBlock process the leafs of the L1 info tree found on a block // this function can be called without leafs with the intention to track the last processed block -func (p *processor) ProcessBlock(b sync.Block) error { - tx, err := p.db.BeginRw(context.Background()) +func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { + tx, err := p.db.BeginRw(ctx) if err != nil { return err } @@ -387,19 +437,40 @@ func (p *processor) ProcessBlock(b sync.Block) error { } else { initialIndex = lastIndex + 1 } + var nextExpectedRollupExitTreeRoot *ethCommon.Hash for i, e := range b.Events { event := e.(Event) - leafToStore := storeLeaf{ - Index: initialIndex + uint32(i), - MainnetExitRoot: event.MainnetExitRoot, - RollupExitRoot: event.RollupExitRoot, - ParentHash: event.ParentHash, - Timestamp: event.Timestamp, - BlockNumber: b.Num, + if event.UpdateL1InfoTree != nil { + leafToStore := storeLeaf{ + Index: initialIndex + uint32(i), + MainnetExitRoot: event.UpdateL1InfoTree.MainnetExitRoot, + RollupExitRoot: event.UpdateL1InfoTree.RollupExitRoot, + ParentHash: event.UpdateL1InfoTree.ParentHash, + Timestamp: event.UpdateL1InfoTree.Timestamp, + BlockNumber: b.Num, + } + if err := p.addLeaf(tx, leafToStore); err != nil { + tx.Rollback() + return err + } + nextExpectedRollupExitTreeRoot = &leafToStore.RollupExitRoot } - if err := p.addLeaf(tx, leafToStore); err != nil { - tx.Rollback() - return err + + if event.VerifyBatches != nil { + // before the verify batches event happens, the updateExitRoot event is emitted. + // Since the previous event include the rollup exit root, this can use it to assert + // that the computation of the tree is correct. However, there are some execution paths + // on the contract that don't follow this (verifyBatches + pendingStateTimeout != 0) + if err := p.rollupExitTree.UpsertLeaf( + ctx, + event.VerifyBatches.RollupID, + event.VerifyBatches.ExitRoot, + nextExpectedRollupExitTreeRoot, + ); err != nil { + tx.Rollback() + return err + } + nextExpectedRollupExitTreeRoot = nil } } bwl := blockWithLeafs{ @@ -411,7 +482,7 @@ func (p *processor) ProcessBlock(b sync.Block) error { tx.Rollback() return err } - if err := tx.Put(blockTable, uint64ToBytes(b.Num), blockValue); err != nil { + if err := tx.Put(blockTable, common.Uint64ToBytes(b.Num), blockValue); err != nil { tx.Rollback() return err } @@ -420,7 +491,6 @@ func (p *processor) ProcessBlock(b sync.Block) error { tx.Rollback() return err } - log.Debugf("block %d processed with events: %+v", b.Num, b.Events) return tx.Commit() } @@ -432,7 +502,7 @@ func (p *processor) getLastIndex(tx kv.Tx) (uint32, error) { if bNum == 0 { return 0, nil } - iter, err := tx.RangeDescend(blockTable, uint64ToBytes(bNum), uint64ToBytes(0), 1) + iter, err := tx.RangeDescend(blockTable, common.Uint64ToBytes(bNum), common.Uint64ToBytes(0), 1) if err != nil { return 0, err } @@ -453,7 +523,7 @@ func (p *processor) getLastIndex(tx kv.Tx) (uint32, error) { func (p *processor) addLeaf(tx kv.RwTx, leaf storeLeaf) error { // Update tree hash := l1infotree.HashLeafData(leaf.GlobalExitRoot(), leaf.ParentHash, leaf.Timestamp) - root, err := p.tree.AddLeaf(leaf.Index, hash) + root, err := p.l1InfoTree.AddLeaf(leaf.Index, hash) if err != nil { return err } @@ -478,16 +548,6 @@ func (p *processor) addLeaf(tx kv.RwTx, leaf storeLeaf) error { } func (p *processor) updateLastProcessedBlock(tx kv.RwTx, blockNum uint64) error { - blockNumBytes := uint64ToBytes(blockNum) - return tx.Put(lastBlockTable, lastBlokcKey, blockNumBytes) -} - -func uint64ToBytes(num uint64) []byte { - key := make([]byte, 8) - binary.LittleEndian.PutUint64(key, num) - return key -} - -func bytes2Uint64(key []byte) uint64 { - return binary.LittleEndian.Uint64(key) + blockNumBytes := common.Uint64ToBytes(blockNum) + return tx.Put(lastBlockTable, lastBlockKey, blockNumBytes) } diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go index e6d9dc8d..9a5efd5f 100644 --- a/sync/evmdownloader.go +++ b/sync/evmdownloader.go @@ -46,9 +46,9 @@ func NewEVMDownloader( if err != nil { return nil, err } - topicsToQuery := [][]common.Hash{} + topicsToQuery := []common.Hash{} for topic := range appender { - topicsToQuery = append(topicsToQuery, []common.Hash{topic}) + topicsToQuery = append(topicsToQuery, topic) } return &EVMDownloader{ syncBlockChunkSize: syncBlockChunkSize, @@ -57,7 +57,7 @@ func NewEVMDownloader( blockFinality: finality, waitForNewBlocksPeriod: waitForNewBlocksPeriod, appender: appender, - topicsToQuery: topicsToQuery, + topicsToQuery: [][]common.Hash{topicsToQuery}, adressessToQuery: adressessToQuery, }, }, nil @@ -90,7 +90,7 @@ func (d *EVMDownloader) download(ctx context.Context, fromBlock uint64, download } if len(blocks) == 0 || blocks[len(blocks)-1].Num < toBlock { // Indicate the last downloaded block if there are not events on it - log.Debugf("sending block %d to the driver (without evvents)", toBlock) + log.Debugf("sending block %d to the driver (without events)", toBlock) downloadedCh <- EVMBlock{ EVMBlockHeader: d.getBlockHeader(ctx, toBlock), } @@ -110,18 +110,25 @@ type downloaderImplementation struct { func (d *downloaderImplementation) waitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) { attempts := 0 + ticker := time.NewTicker(d.waitForNewBlocksPeriod) + defer ticker.Stop() for { - header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) - if err != nil { - attempts++ - log.Error("error geting last block num from eth client: ", err) - RetryHandler("waitForNewBlocks", attempts) - continue - } - if header.Number.Uint64() > lastBlockSeen { - return header.Number.Uint64() + select { + case <-ctx.Done(): + log.Info("context cancelled") + return lastBlockSeen + case <-ticker.C: + header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) + if err != nil { + attempts++ + log.Error("error getting last block num from eth client: ", err) + RetryHandler("waitForNewBlocks", attempts) + continue + } + if header.Number.Uint64() > lastBlockSeen { + return header.Number.Uint64() + } } - time.Sleep(d.waitForNewBlocksPeriod) } } diff --git a/sync/evmdriver.go b/sync/evmdriver.go index a30b96d6..9eabe644 100644 --- a/sync/evmdriver.go +++ b/sync/evmdriver.go @@ -24,8 +24,8 @@ type EVMDriver struct { type processorInterface interface { GetLastProcessedBlock(ctx context.Context) (uint64, error) - ProcessBlock(block Block) error - Reorg(firstReorgedBlock uint64) error + ProcessBlock(ctx context.Context, block Block) error + Reorg(ctx context.Context, firstReorgedBlock uint64) error } type ReorgDetector interface { @@ -85,7 +85,7 @@ reset: d.handleNewBlock(ctx, b) case firstReorgedBlock := <-d.reorgSub.FirstReorgedBlock: log.Debug("handleReorg") - d.handleReorg(cancel, downloadCh, firstReorgedBlock) + d.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) goto reset } } @@ -109,7 +109,7 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { Num: b.Num, Events: b.Events, } - err := d.processor.ProcessBlock(blockToProcess) + err := d.processor.ProcessBlock(ctx, blockToProcess) if err != nil { attempts++ log.Errorf("error processing events for blcok %d, err: ", b.Num, err) @@ -121,7 +121,7 @@ func (d *EVMDriver) handleNewBlock(ctx context.Context, b EVMBlock) { } func (d *EVMDriver) handleReorg( - cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, + ctx context.Context, cancel context.CancelFunc, downloadCh chan EVMBlock, firstReorgedBlock uint64, ) { // stop downloader cancel() @@ -132,7 +132,7 @@ func (d *EVMDriver) handleReorg( // handle reorg attempts := 0 for { - err := d.processor.Reorg(firstReorgedBlock) + err := d.processor.Reorg(ctx, firstReorgedBlock) if err != nil { attempts++ log.Errorf( diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go index 502722f6..a09b94a8 100644 --- a/sync/evmdriver_test.go +++ b/sync/evmdriver_test.go @@ -78,18 +78,18 @@ func TestSync(t *testing.T) { Return(uint64(3), nil) rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock1.Num, expectedBlock1.Hash). Return(nil) - pm.On("ProcessBlock", Block{Num: expectedBlock1.Num, Events: expectedBlock1.Events}). + pm.On("ProcessBlock", ctx, Block{Num: expectedBlock1.Num, Events: expectedBlock1.Events}). Return(nil) rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock2.Num, expectedBlock2.Hash). Return(nil) - pm.On("ProcessBlock", Block{Num: expectedBlock2.Num, Events: expectedBlock2.Events}). + pm.On("ProcessBlock", ctx, Block{Num: expectedBlock2.Num, Events: expectedBlock2.Events}). Return(nil) go driver.Sync(ctx) time.Sleep(time.Millisecond * 200) // time to download expectedBlock1 // Trigger reorg 1 reorgedBlock1 := uint64(5) - pm.On("Reorg", reorgedBlock1).Return(nil) + pm.On("Reorg", ctx, reorgedBlock1).Return(nil) firstReorgedBlock <- reorgedBlock1 ok := <-reorgProcessed require.True(t, ok) @@ -100,7 +100,7 @@ func TestSync(t *testing.T) { // Trigger reorg 2: syncer restarts the porcess reorgedBlock2 := uint64(7) - pm.On("Reorg", reorgedBlock2).Return(nil) + pm.On("Reorg", ctx, reorgedBlock2).Return(nil) firstReorgedBlock <- reorgedBlock2 ok = <-reorgProcessed require.True(t, ok) @@ -126,7 +126,7 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b1.Num, b1.Hash). Return(nil) - pm.On("ProcessBlock", Block{Num: b1.Num, Events: b1.Events}). + pm.On("ProcessBlock", ctx, Block{Num: b1.Num, Events: b1.Events}). Return(nil) driver.handleNewBlock(ctx, b1) @@ -143,7 +143,7 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b2.Num, b2.Hash). Return(nil).Once() - pm.On("ProcessBlock", Block{Num: b2.Num, Events: b2.Events}). + pm.On("ProcessBlock", ctx, Block{Num: b2.Num, Events: b2.Events}). Return(nil) driver.handleNewBlock(ctx, b2) @@ -157,9 +157,9 @@ func TestHandleNewBlock(t *testing.T) { rdm. On("AddBlockToTrack", ctx, reorgDetectorID, b3.Num, b3.Hash). Return(nil) - pm.On("ProcessBlock", Block{Num: b3.Num, Events: b3.Events}). + pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events}). Return(errors.New("foo")).Once() - pm.On("ProcessBlock", Block{Num: b3.Num, Events: b3.Events}). + pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events}). Return(nil).Once() driver.handleNewBlock(ctx, b3) @@ -182,8 +182,8 @@ func TestHandleReorg(t *testing.T) { _, cancel := context.WithCancel(ctx) downloadCh := make(chan EVMBlock) firstReorgedBlock := uint64(5) - pm.On("Reorg", firstReorgedBlock).Return(nil) - go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) + pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) + go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) close(downloadCh) done := <-reorgProcessed require.True(t, done) @@ -192,8 +192,8 @@ func TestHandleReorg(t *testing.T) { _, cancel = context.WithCancel(ctx) downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(6) - pm.On("Reorg", firstReorgedBlock).Return(nil) - go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) + pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) + go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) downloadCh <- EVMBlock{} downloadCh <- EVMBlock{} downloadCh <- EVMBlock{} @@ -205,10 +205,10 @@ func TestHandleReorg(t *testing.T) { _, cancel = context.WithCancel(ctx) downloadCh = make(chan EVMBlock) firstReorgedBlock = uint64(7) - pm.On("Reorg", firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("Reorg", firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("Reorg", firstReorgedBlock).Return(nil).Once() - go driver.handleReorg(cancel, downloadCh, firstReorgedBlock) + pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() + pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() + pm.On("Reorg", ctx, firstReorgedBlock).Return(nil).Once() + go driver.handleReorg(ctx, cancel, downloadCh, firstReorgedBlock) close(downloadCh) done = <-reorgProcessed require.True(t, done) diff --git a/sync/mock_processor_test.go b/sync/mock_processor_test.go index d2c3e299..19738ef5 100644 --- a/sync/mock_processor_test.go +++ b/sync/mock_processor_test.go @@ -37,13 +37,13 @@ func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, err return r0, r1 } -// ProcessBlock provides a mock function with given fields: block -func (_m *ProcessorMock) ProcessBlock(block Block) error { - ret := _m.Called(block) +// ProcessBlock provides a mock function with given fields: ctx, block +func (_m *ProcessorMock) ProcessBlock(ctx context.Context, block Block) error { + ret := _m.Called(ctx, block) var r0 error - if rf, ok := ret.Get(0).(func(Block) error); ok { - r0 = rf(block) + if rf, ok := ret.Get(0).(func(context.Context, Block) error); ok { + r0 = rf(ctx, block) } else { r0 = ret.Error(0) } @@ -51,13 +51,13 @@ func (_m *ProcessorMock) ProcessBlock(block Block) error { return r0 } -// Reorg provides a mock function with given fields: firstReorgedBlock -func (_m *ProcessorMock) Reorg(firstReorgedBlock uint64) error { - ret := _m.Called(firstReorgedBlock) +// Reorg provides a mock function with given fields: ctx, firstReorgedBlock +func (_m *ProcessorMock) Reorg(ctx context.Context, firstReorgedBlock uint64) error { + ret := _m.Called(ctx, firstReorgedBlock) var r0 error - if rf, ok := ret.Get(0).(func(uint64) error); ok { - r0 = rf(firstReorgedBlock) + if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { + r0 = rf(ctx, firstReorgedBlock) } else { r0 = ret.Error(0) } diff --git a/test/contracts/abi/erc1967proxy.abi b/test/contracts/abi/erc1967proxy.abi new file mode 100644 index 00000000..f676814a --- /dev/null +++ b/test/contracts/abi/erc1967proxy.abi @@ -0,0 +1,71 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_logic", + "type": "address" + }, + { + "internalType": "bytes", + "name": "_data", + "type": "bytes" + } + ], + "stateMutability": "payable", + "type": "constructor" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "address", + "name": "previousAdmin", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "newAdmin", + "type": "address" + } + ], + "name": "AdminChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "beacon", + "type": "address" + } + ], + "name": "BeaconUpgraded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "implementation", + "type": "address" + } + ], + "name": "Upgraded", + "type": "event" + }, + { + "stateMutability": "payable", + "type": "fallback" + }, + { + "stateMutability": "payable", + "type": "receive" + } + ] \ No newline at end of file diff --git a/test/contracts/bin/erc1967proxy.bin b/test/contracts/bin/erc1967proxy.bin new file mode 100644 index 00000000..d81a2e24 --- /dev/null +++ b/test/contracts/bin/erc1967proxy.bin @@ -0,0 +1 @@ +60806040526040516104ee3803806104ee833981016040819052610022916102de565b61002e82826000610035565b50506103fb565b61003e83610061565b60008251118061004b5750805b1561005c5761005a83836100a1565b505b505050565b61006a816100cd565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b60606100c683836040518060600160405280602781526020016104c760279139610180565b9392505050565b6001600160a01b0381163b61013f5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b60648201526084015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80546001600160a01b0319166001600160a01b0392909216919091179055565b6060600080856001600160a01b03168560405161019d91906103ac565b600060405180830381855af49150503d80600081146101d8576040519150601f19603f3d011682016040523d82523d6000602084013e6101dd565b606091505b5090925090506101ef868383876101f9565b9695505050505050565b60608315610268578251600003610261576001600160a01b0385163b6102615760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610136565b5081610272565b610272838361027a565b949350505050565b81511561028a5781518083602001fd5b8060405162461bcd60e51b815260040161013691906103c8565b634e487b7160e01b600052604160045260246000fd5b60005b838110156102d55781810151838201526020016102bd565b50506000910152565b600080604083850312156102f157600080fd5b82516001600160a01b038116811461030857600080fd5b60208401519092506001600160401b038082111561032557600080fd5b818501915085601f83011261033957600080fd5b81518181111561034b5761034b6102a4565b604051601f8201601f19908116603f01168101908382118183101715610373576103736102a4565b8160405282815288602084870101111561038c57600080fd5b61039d8360208301602088016102ba565b80955050505050509250929050565b600082516103be8184602087016102ba565b9190910192915050565b60208152600082518060208401526103e78160408501602087016102ba565b601f01601f19169190910160400192915050565b60be806104096000396000f3fe608060405236601057600e6013565b005b600e5b601f601b6021565b6065565b565b600060607f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b3660008037600080366000845af43d6000803e8080156083573d6000f35b3d6000fdfea2646970667358221220ffbfbaa210c1b5f5ca62a5eba67b7d993e0bdf919f51500f790fb7acf2fd784c64736f6c63430008140033416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564 \ No newline at end of file diff --git a/test/contracts/erc1967proxy/erc1967proxy.go b/test/contracts/erc1967proxy/erc1967proxy.go new file mode 100644 index 00000000..f4994e0d --- /dev/null +++ b/test/contracts/erc1967proxy/erc1967proxy.go @@ -0,0 +1,668 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package erc1967proxy + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// Erc1967proxyMetaData contains all meta data concerning the Erc1967proxy contract. +var Erc1967proxyMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_logic\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"stateMutability\":\"payable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"previousAdmin\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"newAdmin\",\"type\":\"address\"}],\"name\":\"AdminChanged\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"beacon\",\"type\":\"address\"}],\"name\":\"BeaconUpgraded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"implementation\",\"type\":\"address\"}],\"name\":\"Upgraded\",\"type\":\"event\"},{\"stateMutability\":\"payable\",\"type\":\"fallback\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", + Bin: "0x60806040526040516104ee3803806104ee833981016040819052610022916102de565b61002e82826000610035565b50506103fb565b61003e83610061565b60008251118061004b5750805b1561005c5761005a83836100a1565b505b505050565b61006a816100cd565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b60606100c683836040518060600160405280602781526020016104c760279139610180565b9392505050565b6001600160a01b0381163b61013f5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b60648201526084015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80546001600160a01b0319166001600160a01b0392909216919091179055565b6060600080856001600160a01b03168560405161019d91906103ac565b600060405180830381855af49150503d80600081146101d8576040519150601f19603f3d011682016040523d82523d6000602084013e6101dd565b606091505b5090925090506101ef868383876101f9565b9695505050505050565b60608315610268578251600003610261576001600160a01b0385163b6102615760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610136565b5081610272565b610272838361027a565b949350505050565b81511561028a5781518083602001fd5b8060405162461bcd60e51b815260040161013691906103c8565b634e487b7160e01b600052604160045260246000fd5b60005b838110156102d55781810151838201526020016102bd565b50506000910152565b600080604083850312156102f157600080fd5b82516001600160a01b038116811461030857600080fd5b60208401519092506001600160401b038082111561032557600080fd5b818501915085601f83011261033957600080fd5b81518181111561034b5761034b6102a4565b604051601f8201601f19908116603f01168101908382118183101715610373576103736102a4565b8160405282815288602084870101111561038c57600080fd5b61039d8360208301602088016102ba565b80955050505050509250929050565b600082516103be8184602087016102ba565b9190910192915050565b60208152600082518060208401526103e78160408501602087016102ba565b601f01601f19169190910160400192915050565b60be806104096000396000f3fe608060405236601057600e6013565b005b600e5b601f601b6021565b6065565b565b600060607f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b3660008037600080366000845af43d6000803e8080156083573d6000f35b3d6000fdfea2646970667358221220ffbfbaa210c1b5f5ca62a5eba67b7d993e0bdf919f51500f790fb7acf2fd784c64736f6c63430008140033416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564", +} + +// Erc1967proxyABI is the input ABI used to generate the binding from. +// Deprecated: Use Erc1967proxyMetaData.ABI instead. +var Erc1967proxyABI = Erc1967proxyMetaData.ABI + +// Erc1967proxyBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use Erc1967proxyMetaData.Bin instead. +var Erc1967proxyBin = Erc1967proxyMetaData.Bin + +// DeployErc1967proxy deploys a new Ethereum contract, binding an instance of Erc1967proxy to it. +func DeployErc1967proxy(auth *bind.TransactOpts, backend bind.ContractBackend, _logic common.Address, _data []byte) (common.Address, *types.Transaction, *Erc1967proxy, error) { + parsed, err := Erc1967proxyMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(Erc1967proxyBin), backend, _logic, _data) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Erc1967proxy{Erc1967proxyCaller: Erc1967proxyCaller{contract: contract}, Erc1967proxyTransactor: Erc1967proxyTransactor{contract: contract}, Erc1967proxyFilterer: Erc1967proxyFilterer{contract: contract}}, nil +} + +// Erc1967proxy is an auto generated Go binding around an Ethereum contract. +type Erc1967proxy struct { + Erc1967proxyCaller // Read-only binding to the contract + Erc1967proxyTransactor // Write-only binding to the contract + Erc1967proxyFilterer // Log filterer for contract events +} + +// Erc1967proxyCaller is an auto generated read-only Go binding around an Ethereum contract. +type Erc1967proxyCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// Erc1967proxyTransactor is an auto generated write-only Go binding around an Ethereum contract. +type Erc1967proxyTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// Erc1967proxyFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type Erc1967proxyFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// Erc1967proxySession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type Erc1967proxySession struct { + Contract *Erc1967proxy // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// Erc1967proxyCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type Erc1967proxyCallerSession struct { + Contract *Erc1967proxyCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// Erc1967proxyTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type Erc1967proxyTransactorSession struct { + Contract *Erc1967proxyTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// Erc1967proxyRaw is an auto generated low-level Go binding around an Ethereum contract. +type Erc1967proxyRaw struct { + Contract *Erc1967proxy // Generic contract binding to access the raw methods on +} + +// Erc1967proxyCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type Erc1967proxyCallerRaw struct { + Contract *Erc1967proxyCaller // Generic read-only contract binding to access the raw methods on +} + +// Erc1967proxyTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type Erc1967proxyTransactorRaw struct { + Contract *Erc1967proxyTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewErc1967proxy creates a new instance of Erc1967proxy, bound to a specific deployed contract. +func NewErc1967proxy(address common.Address, backend bind.ContractBackend) (*Erc1967proxy, error) { + contract, err := bindErc1967proxy(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Erc1967proxy{Erc1967proxyCaller: Erc1967proxyCaller{contract: contract}, Erc1967proxyTransactor: Erc1967proxyTransactor{contract: contract}, Erc1967proxyFilterer: Erc1967proxyFilterer{contract: contract}}, nil +} + +// NewErc1967proxyCaller creates a new read-only instance of Erc1967proxy, bound to a specific deployed contract. +func NewErc1967proxyCaller(address common.Address, caller bind.ContractCaller) (*Erc1967proxyCaller, error) { + contract, err := bindErc1967proxy(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &Erc1967proxyCaller{contract: contract}, nil +} + +// NewErc1967proxyTransactor creates a new write-only instance of Erc1967proxy, bound to a specific deployed contract. +func NewErc1967proxyTransactor(address common.Address, transactor bind.ContractTransactor) (*Erc1967proxyTransactor, error) { + contract, err := bindErc1967proxy(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &Erc1967proxyTransactor{contract: contract}, nil +} + +// NewErc1967proxyFilterer creates a new log filterer instance of Erc1967proxy, bound to a specific deployed contract. +func NewErc1967proxyFilterer(address common.Address, filterer bind.ContractFilterer) (*Erc1967proxyFilterer, error) { + contract, err := bindErc1967proxy(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &Erc1967proxyFilterer{contract: contract}, nil +} + +// bindErc1967proxy binds a generic wrapper to an already deployed contract. +func bindErc1967proxy(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := Erc1967proxyMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Erc1967proxy *Erc1967proxyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Erc1967proxy.Contract.Erc1967proxyCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Erc1967proxy *Erc1967proxyRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Erc1967proxy.Contract.Erc1967proxyTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Erc1967proxy *Erc1967proxyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Erc1967proxy.Contract.Erc1967proxyTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Erc1967proxy *Erc1967proxyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Erc1967proxy.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Erc1967proxy *Erc1967proxyTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Erc1967proxy.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Erc1967proxy *Erc1967proxyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Erc1967proxy.Contract.contract.Transact(opts, method, params...) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_Erc1967proxy *Erc1967proxyTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { + return _Erc1967proxy.contract.RawTransact(opts, calldata) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_Erc1967proxy *Erc1967proxySession) Fallback(calldata []byte) (*types.Transaction, error) { + return _Erc1967proxy.Contract.Fallback(&_Erc1967proxy.TransactOpts, calldata) +} + +// Fallback is a paid mutator transaction binding the contract fallback function. +// +// Solidity: fallback() payable returns() +func (_Erc1967proxy *Erc1967proxyTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { + return _Erc1967proxy.Contract.Fallback(&_Erc1967proxy.TransactOpts, calldata) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Erc1967proxy *Erc1967proxyTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Erc1967proxy.contract.RawTransact(opts, nil) // calldata is disallowed for receive function +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Erc1967proxy *Erc1967proxySession) Receive() (*types.Transaction, error) { + return _Erc1967proxy.Contract.Receive(&_Erc1967proxy.TransactOpts) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Erc1967proxy *Erc1967proxyTransactorSession) Receive() (*types.Transaction, error) { + return _Erc1967proxy.Contract.Receive(&_Erc1967proxy.TransactOpts) +} + +// Erc1967proxyAdminChangedIterator is returned from FilterAdminChanged and is used to iterate over the raw logs and unpacked data for AdminChanged events raised by the Erc1967proxy contract. +type Erc1967proxyAdminChangedIterator struct { + Event *Erc1967proxyAdminChanged // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *Erc1967proxyAdminChangedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyAdminChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyAdminChanged) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *Erc1967proxyAdminChangedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *Erc1967proxyAdminChangedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// Erc1967proxyAdminChanged represents a AdminChanged event raised by the Erc1967proxy contract. +type Erc1967proxyAdminChanged struct { + PreviousAdmin common.Address + NewAdmin common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterAdminChanged is a free log retrieval operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f. +// +// Solidity: event AdminChanged(address previousAdmin, address newAdmin) +func (_Erc1967proxy *Erc1967proxyFilterer) FilterAdminChanged(opts *bind.FilterOpts) (*Erc1967proxyAdminChangedIterator, error) { + + logs, sub, err := _Erc1967proxy.contract.FilterLogs(opts, "AdminChanged") + if err != nil { + return nil, err + } + return &Erc1967proxyAdminChangedIterator{contract: _Erc1967proxy.contract, event: "AdminChanged", logs: logs, sub: sub}, nil +} + +// WatchAdminChanged is a free log subscription operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f. +// +// Solidity: event AdminChanged(address previousAdmin, address newAdmin) +func (_Erc1967proxy *Erc1967proxyFilterer) WatchAdminChanged(opts *bind.WatchOpts, sink chan<- *Erc1967proxyAdminChanged) (event.Subscription, error) { + + logs, sub, err := _Erc1967proxy.contract.WatchLogs(opts, "AdminChanged") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(Erc1967proxyAdminChanged) + if err := _Erc1967proxy.contract.UnpackLog(event, "AdminChanged", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseAdminChanged is a log parse operation binding the contract event 0x7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f. +// +// Solidity: event AdminChanged(address previousAdmin, address newAdmin) +func (_Erc1967proxy *Erc1967proxyFilterer) ParseAdminChanged(log types.Log) (*Erc1967proxyAdminChanged, error) { + event := new(Erc1967proxyAdminChanged) + if err := _Erc1967proxy.contract.UnpackLog(event, "AdminChanged", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// Erc1967proxyBeaconUpgradedIterator is returned from FilterBeaconUpgraded and is used to iterate over the raw logs and unpacked data for BeaconUpgraded events raised by the Erc1967proxy contract. +type Erc1967proxyBeaconUpgradedIterator struct { + Event *Erc1967proxyBeaconUpgraded // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *Erc1967proxyBeaconUpgradedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyBeaconUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyBeaconUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *Erc1967proxyBeaconUpgradedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *Erc1967proxyBeaconUpgradedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// Erc1967proxyBeaconUpgraded represents a BeaconUpgraded event raised by the Erc1967proxy contract. +type Erc1967proxyBeaconUpgraded struct { + Beacon common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterBeaconUpgraded is a free log retrieval operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e. +// +// Solidity: event BeaconUpgraded(address indexed beacon) +func (_Erc1967proxy *Erc1967proxyFilterer) FilterBeaconUpgraded(opts *bind.FilterOpts, beacon []common.Address) (*Erc1967proxyBeaconUpgradedIterator, error) { + + var beaconRule []interface{} + for _, beaconItem := range beacon { + beaconRule = append(beaconRule, beaconItem) + } + + logs, sub, err := _Erc1967proxy.contract.FilterLogs(opts, "BeaconUpgraded", beaconRule) + if err != nil { + return nil, err + } + return &Erc1967proxyBeaconUpgradedIterator{contract: _Erc1967proxy.contract, event: "BeaconUpgraded", logs: logs, sub: sub}, nil +} + +// WatchBeaconUpgraded is a free log subscription operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e. +// +// Solidity: event BeaconUpgraded(address indexed beacon) +func (_Erc1967proxy *Erc1967proxyFilterer) WatchBeaconUpgraded(opts *bind.WatchOpts, sink chan<- *Erc1967proxyBeaconUpgraded, beacon []common.Address) (event.Subscription, error) { + + var beaconRule []interface{} + for _, beaconItem := range beacon { + beaconRule = append(beaconRule, beaconItem) + } + + logs, sub, err := _Erc1967proxy.contract.WatchLogs(opts, "BeaconUpgraded", beaconRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(Erc1967proxyBeaconUpgraded) + if err := _Erc1967proxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseBeaconUpgraded is a log parse operation binding the contract event 0x1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e. +// +// Solidity: event BeaconUpgraded(address indexed beacon) +func (_Erc1967proxy *Erc1967proxyFilterer) ParseBeaconUpgraded(log types.Log) (*Erc1967proxyBeaconUpgraded, error) { + event := new(Erc1967proxyBeaconUpgraded) + if err := _Erc1967proxy.contract.UnpackLog(event, "BeaconUpgraded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// Erc1967proxyUpgradedIterator is returned from FilterUpgraded and is used to iterate over the raw logs and unpacked data for Upgraded events raised by the Erc1967proxy contract. +type Erc1967proxyUpgradedIterator struct { + Event *Erc1967proxyUpgraded // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *Erc1967proxyUpgradedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(Erc1967proxyUpgraded) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *Erc1967proxyUpgradedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *Erc1967proxyUpgradedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// Erc1967proxyUpgraded represents a Upgraded event raised by the Erc1967proxy contract. +type Erc1967proxyUpgraded struct { + Implementation common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterUpgraded is a free log retrieval operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b. +// +// Solidity: event Upgraded(address indexed implementation) +func (_Erc1967proxy *Erc1967proxyFilterer) FilterUpgraded(opts *bind.FilterOpts, implementation []common.Address) (*Erc1967proxyUpgradedIterator, error) { + + var implementationRule []interface{} + for _, implementationItem := range implementation { + implementationRule = append(implementationRule, implementationItem) + } + + logs, sub, err := _Erc1967proxy.contract.FilterLogs(opts, "Upgraded", implementationRule) + if err != nil { + return nil, err + } + return &Erc1967proxyUpgradedIterator{contract: _Erc1967proxy.contract, event: "Upgraded", logs: logs, sub: sub}, nil +} + +// WatchUpgraded is a free log subscription operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b. +// +// Solidity: event Upgraded(address indexed implementation) +func (_Erc1967proxy *Erc1967proxyFilterer) WatchUpgraded(opts *bind.WatchOpts, sink chan<- *Erc1967proxyUpgraded, implementation []common.Address) (event.Subscription, error) { + + var implementationRule []interface{} + for _, implementationItem := range implementation { + implementationRule = append(implementationRule, implementationItem) + } + + logs, sub, err := _Erc1967proxy.contract.WatchLogs(opts, "Upgraded", implementationRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(Erc1967proxyUpgraded) + if err := _Erc1967proxy.contract.UnpackLog(event, "Upgraded", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseUpgraded is a log parse operation binding the contract event 0xbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b. +// +// Solidity: event Upgraded(address indexed implementation) +func (_Erc1967proxy *Erc1967proxyFilterer) ParseUpgraded(log types.Log) (*Erc1967proxyUpgraded, error) { + event := new(Erc1967proxyUpgraded) + if err := _Erc1967proxy.contract.UnpackLog(event, "Upgraded", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go new file mode 100644 index 00000000..39719028 --- /dev/null +++ b/tree/appendonlytree.go @@ -0,0 +1,243 @@ +package tree + +import ( + "context" + "fmt" + "math" + + dbCommon "github.com/0xPolygon/cdk/common" + "github.com/ethereum/go-ethereum/common" + "github.com/ledgerwatch/erigon-lib/kv" +) + +type AppendOnlyTree struct { + *Tree + lastLeftCache []common.Hash + lastIndex int64 +} + +func NewAppendOnly(ctx context.Context, dbPath, dbPrefix string) (*AppendOnlyTree, error) { + t, err := newTree(dbPath, dbPrefix) + if err != nil { + return nil, err + } + at := &AppendOnlyTree{Tree: t} + if err := at.initLastLeftCacheAndLastDepositCount(ctx); err != nil { + return nil, err + } + return at, nil +} + +// AddLeaves adds a list leaves into the tree +func (t *AppendOnlyTree) AddLeaves(ctx context.Context, leaves []Leaf) error { + // Sanity check + if len(leaves) == 0 { + return nil + } + if int64(leaves[0].Index) != t.lastIndex+1 { + return fmt.Errorf( + "mismatched index. Expected: %d, actual: %d", + t.lastIndex+1, leaves[0].Index, + ) + } + tx, err := t.db.BeginRw(ctx) + if err != nil { + return err + } + backupIndx := t.lastIndex + backupCache := make([]common.Hash, len(t.lastLeftCache)) + copy(backupCache, t.lastLeftCache) + + for _, leaf := range leaves { + if err := t.addLeaf(tx, leaf); err != nil { + tx.Rollback() + t.lastIndex = backupIndx + t.lastLeftCache = backupCache + return err + } + } + + if err := tx.Commit(); err != nil { + t.lastIndex = backupIndx + t.lastLeftCache = backupCache + return err + } + return nil +} + +func (t *AppendOnlyTree) addLeaf(tx kv.RwTx, leaf Leaf) error { + // Calculate new tree nodes + currentChildHash := leaf.Hash + newNodes := []treeNode{} + for h := uint8(0); h < t.height; h++ { + var parent treeNode + if leaf.Index&(1< 0 { + // Add child to the right + parent = treeNode{ + left: t.lastLeftCache[h], + right: currentChildHash, + } + } else { + // Add child to the left + parent = treeNode{ + left: currentChildHash, + right: t.zeroHashes[h], + } + // Update cache + // TODO: review this part of the logic, skipping ?optimizaton? + // from OG implementation + t.lastLeftCache[h] = currentChildHash + } + currentChildHash = parent.hash() + newNodes = append(newNodes, parent) + } + + // store root + root := currentChildHash + if err := tx.Put(t.rootTable, dbCommon.Uint32ToBytes(leaf.Index), root[:]); err != nil { + return err + } + + // store nodes + for _, node := range newNodes { + value, err := node.MarshalBinary() + if err != nil { + return err + } + if err := tx.Put(t.rhtTable, node.hash().Bytes(), value); err != nil { + return err + } + } + + t.lastIndex++ + return nil +} + +func (t *AppendOnlyTree) initLastLeftCacheAndLastDepositCount(ctx context.Context) error { + tx, err := t.db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + root, err := t.initLastIndex(tx) + if err != nil { + return err + } + return t.initLastLeftCache(tx, t.lastIndex, root) +} + +// getLastIndexAndRoot return the index and the root associated to the last leaf inserted. +// If index == -1, it means no leaf added yet +func (t *AppendOnlyTree) getLastIndexAndRoot(tx kv.Tx) (int64, common.Hash, error) { + iter, err := tx.RangeDescend( + t.rootTable, + dbCommon.Uint32ToBytes(math.MaxUint32), + dbCommon.Uint32ToBytes(0), + 1, + ) + if err != nil { + return 0, common.Hash{}, err + } + + lastIndexBytes, rootBytes, err := iter.Next() + if err != nil { + return 0, common.Hash{}, err + } + if lastIndexBytes == nil { + return -1, common.Hash{}, nil + } + return int64(dbCommon.BytesToUint32(lastIndexBytes)), common.Hash(rootBytes), nil +} + +func (t *AppendOnlyTree) initLastIndex(tx kv.Tx) (common.Hash, error) { + ldc, root, err := t.getLastIndexAndRoot(tx) + if err != nil { + return common.Hash{}, err + } + t.lastIndex = ldc + return root, nil +} +func (t *AppendOnlyTree) initLastLeftCache(tx kv.Tx, lastIndex int64, lastRoot common.Hash) error { + siblings := make([]common.Hash, t.height, t.height) + if lastIndex == -1 { + t.lastLeftCache = siblings + return nil + } + index := lastIndex + + currentNodeHash := lastRoot + // It starts in height-1 because 0 is the level of the leafs + for h := int(t.height - 1); h >= 0; h-- { + currentNode, err := t.getRHTNode(tx, currentNodeHash) + if err != nil { + return fmt.Errorf( + "error getting node %s from the RHT at height %d with root %s: %v", + currentNodeHash.Hex(), h, lastRoot.Hex(), err, + ) + } + if currentNode == nil { + return ErrNotFound + } + siblings = append(siblings, currentNode.left) + if index&(1< 0 { + currentNodeHash = currentNode.right + } else { + currentNodeHash = currentNode.left + } + } + + // Reverse the siblings to go from leafs to root + for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { + siblings[i], siblings[j] = siblings[j], siblings[i] + } + + t.lastLeftCache = siblings + return nil +} + +// Reorg deletes all the data relevant from firstReorgedIndex (includded) and onwards +// and prepares the tree tfor being used as it was at firstReorgedIndex-1 +func (t *AppendOnlyTree) Reorg(ctx context.Context, firstReorgedIndex uint32) error { + if t.lastIndex == -1 { + return nil + } + tx, err := t.db.BeginRw(ctx) + if err != nil { + return err + } + // Clean root table + for i := firstReorgedIndex; i <= uint32(t.lastIndex); i++ { + if err := tx.Delete(t.rootTable, dbCommon.Uint32ToBytes(i)); err != nil { + tx.Rollback() + return err + } + } + + // Reset + root := common.Hash{} + if firstReorgedIndex > 0 { + rootBytes, err := tx.GetOne(t.rootTable, dbCommon.Uint32ToBytes(firstReorgedIndex-1)) + if err != nil { + tx.Rollback() + return err + } + if rootBytes == nil { + tx.Rollback() + return ErrNotFound + } + root = common.Hash(rootBytes) + } + err = t.initLastLeftCache(tx, int64(firstReorgedIndex)-1, root) + if err != nil { + tx.Rollback() + return err + } + + // Note: not cleaning RHT, not worth it + if err := tx.Commit(); err != nil { + return err + } + t.lastIndex = int64(firstReorgedIndex) - 1 + return nil +} diff --git a/bridgesync/testvectors/claim-vectors.json b/tree/testvectors/claim-vectors.json similarity index 100% rename from bridgesync/testvectors/claim-vectors.json rename to tree/testvectors/claim-vectors.json diff --git a/bridgesync/testvectors/leaf-vectors.json b/tree/testvectors/leaf-vectors.json similarity index 100% rename from bridgesync/testvectors/leaf-vectors.json rename to tree/testvectors/leaf-vectors.json diff --git a/bridgesync/testvectors/root-vectors.json b/tree/testvectors/root-vectors.json similarity index 100% rename from bridgesync/testvectors/root-vectors.json rename to tree/testvectors/root-vectors.json diff --git a/tree/testvectors/types.go b/tree/testvectors/types.go new file mode 100644 index 00000000..f005b6ea --- /dev/null +++ b/tree/testvectors/types.go @@ -0,0 +1,64 @@ +package testvectors + +import ( + "encoding/binary" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/iden3/go-iden3-crypto/keccak256" +) + +// DepositVectorRaw represents the deposit vector +type DepositVectorRaw struct { + OriginalNetwork uint32 `json:"originNetwork"` + TokenAddress string `json:"tokenAddress"` + Amount string `json:"amount"` + DestinationNetwork uint32 `json:"destinationNetwork"` + DestinationAddress string `json:"destinationAddress"` + ExpectedHash string `json:"leafValue"` + CurrentHash string `json:"currentLeafValue"` + Metadata string `json:"metadata"` +} + +func (d *DepositVectorRaw) Hash() common.Hash { + origNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(origNet, uint32(d.OriginalNetwork)) + destNet := make([]byte, 4) //nolint:gomnd + binary.BigEndian.PutUint32(destNet, uint32(d.DestinationNetwork)) + + metaHash := keccak256.Hash(common.FromHex(d.Metadata)) + hash := common.Hash{} + var buf [32]byte //nolint:gomnd + amount, _ := big.NewInt(0).SetString(d.Amount, 0) + origAddrBytes := common.HexToAddress(d.TokenAddress) + destAddrBytes := common.HexToAddress(d.DestinationAddress) + copy( + hash[:], + keccak256.Hash( + []byte{0}, + origNet, + origAddrBytes[:], + destNet, + destAddrBytes[:], + amount.FillBytes(buf[:]), + metaHash, + ), + ) + return hash +} + +// MTClaimVectorRaw represents the merkle proof +type MTClaimVectorRaw struct { + Deposits []DepositVectorRaw `json:"leafs"` + Index uint32 `json:"index"` + MerkleProof []string `json:"proof"` + ExpectedRoot string `json:"root"` +} + +// MTRootVectorRaw represents the root of Merkle Tree +type MTRootVectorRaw struct { + ExistingLeaves []string `json:"previousLeafsValues"` + CurrentRoot string `json:"currentRoot"` + NewLeaf DepositVectorRaw `json:"newLeaf"` + NewRoot string `json:"newRoot"` +} diff --git a/tree/tree.go b/tree/tree.go new file mode 100644 index 00000000..6271f16e --- /dev/null +++ b/tree/tree.go @@ -0,0 +1,175 @@ +package tree + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "golang.org/x/crypto/sha3" +) + +const ( + defaultHeight uint8 = 32 + rootTableSufix = "-root" + rhtTableSufix = "-rht" +) + +var ( + ErrNotFound = errors.New("not found") +) + +type Leaf struct { + Index uint32 + Hash common.Hash +} + +type Tree struct { + db kv.RwDB + rhtTable string + rootTable string + height uint8 + zeroHashes []common.Hash +} + +type treeNode struct { + left common.Hash + right common.Hash +} + +func (n *treeNode) hash() common.Hash { + var hash common.Hash + hasher := sha3.NewLegacyKeccak256() + hasher.Write(n.left[:]) + hasher.Write(n.right[:]) + copy(hash[:], hasher.Sum(nil)) + return hash +} + +func (n *treeNode) MarshalBinary() ([]byte, error) { + return append(n.left[:], n.right[:]...), nil +} + +func (n *treeNode) UnmarshalBinary(data []byte) error { + if len(data) != 64 { + return fmt.Errorf("expected len %d, actual len %d", 64, len(data)) + } + n.left = common.Hash(data[:32]) + n.right = common.Hash(data[32:]) + return nil +} + +func newTree(dbPath, dbPrefix string) (*Tree, error) { + rootTable := dbPrefix + rootTableSufix + rhtTable := dbPrefix + rhtTableSufix + db, err := mdbx.NewMDBX(nil). + Path(dbPath). + WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg { + return kv.TableCfg{ + rootTable: {}, + rhtTable: {}, + } + }). + Open() + if err != nil { + return nil, err + } + t := &Tree{ + rhtTable: rhtTable, + rootTable: rootTable, + db: db, + height: defaultHeight, + zeroHashes: generateZeroHashes(defaultHeight), + } + + return t, nil +} + +// GetProof returns the merkle proof for a given index and root. +func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) ([]common.Hash, error) { + tx, err := t.db.BeginRw(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + siblings := make([]common.Hash, int(t.height)) + + currentNodeHash := root + // It starts in height-1 because 0 is the level of the leafs + for h := int(t.height - 1); h >= 0; h-- { + currentNode, err := t.getRHTNode(tx, currentNodeHash) + if err != nil { + return nil, fmt.Errorf( + "height: %d, currentNode: %s, error: %v", + h, currentNodeHash.Hex(), err, + ) + } + /* + * Root (level h=3 => height=4) + * / \ + * O5 O6 (level h=2) + * / \ / \ + * O1 O2 O3 O4 (level h=1) + * /\ /\ /\ /\ + * 0 1 2 3 4 5 6 7 Leafs (level h=0) + * Example 1: + * Choose index = 3 => 011 binary + * Assuming we are in level 1 => h=1; 1< 011&010=010 which is higher than 0 so we need the left sibling (O1) + * Example 2: + * Choose index = 4 => 100 binary + * Assuming we are in level 1 => h=1; 1< 100&010=000 which is not higher than 0 so we need the right sibling (O4) + * Example 3: + * Choose index = 4 => 100 binary + * Assuming we are in level 2 => h=2; 1< 100&100=100 which is higher than 0 so we need the left sibling (O5) + */ + if index&(1< 0 { + siblings = append(siblings, currentNode.left) + currentNodeHash = currentNode.right + } else { + siblings = append(siblings, currentNode.right) + currentNodeHash = currentNode.left + } + } + + // Reverse siblings to go from leafs to root + for i, j := 0, len(siblings)-1; i < j; i, j = i+1, j-1 { + siblings[i], siblings[j] = siblings[j], siblings[i] + } + + return siblings, nil +} + +func (t *Tree) getRHTNode(tx kv.Tx, nodeHash common.Hash) (*treeNode, error) { + nodeBytes, err := tx.GetOne(t.rhtTable, nodeHash[:]) + if err != nil { + return nil, err + } + if nodeBytes == nil { + return nil, ErrNotFound + } + node := &treeNode{} + err = node.UnmarshalBinary(nodeBytes) + return node, err +} + +func generateZeroHashes(height uint8) []common.Hash { + var zeroHashes = []common.Hash{ + {}, + } + // This generates a leaf = HashZero in position 0. In the rest of the positions that are equivalent to the ascending levels, + // we set the hashes of the nodes. So all nodes from level i=5 will have the same value and same children nodes. + for i := 1; i <= int(height); i++ { + hasher := sha3.NewLegacyKeccak256() + hasher.Write(zeroHashes[i-1][:]) + hasher.Write(zeroHashes[i-1][:]) + thisHeightHash := common.Hash{} + copy(thisHeightHash[:], hasher.Sum(nil)) + zeroHashes = append(zeroHashes, thisHeightHash) + } + return zeroHashes +} diff --git a/tree/tree_test.go b/tree/tree_test.go new file mode 100644 index 00000000..47129aee --- /dev/null +++ b/tree/tree_test.go @@ -0,0 +1,103 @@ +package tree + +import ( + "context" + "encoding/json" + "fmt" + "os" + "testing" + + "github.com/0xPolygon/cdk/tree/testvectors" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestMTAddLeaf(t *testing.T) { + data, err := os.ReadFile("testvectors/root-vectors.json") + require.NoError(t, err) + + var mtTestVectors []testvectors.MTRootVectorRaw + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + ctx := context.Background() + + for ti, testVector := range mtTestVectors { + t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { + + path := t.TempDir() + tree, err := NewAppendOnly(context.Background(), path, "foo") + require.NoError(t, err) + + // Add exisiting leaves + leaves := []Leaf{} + for i, leaf := range testVector.ExistingLeaves { + leaves = append(leaves, Leaf{ + Index: uint32(i), + Hash: common.HexToHash(leaf), + }) + } + err = tree.AddLeaves(ctx, leaves) + require.NoError(t, err) + if len(testVector.ExistingLeaves) > 0 { + txRo, err := tree.db.BeginRo(ctx) + require.NoError(t, err) + _, actualRoot, err := tree.getLastIndexAndRoot(txRo) + txRo.Rollback() + require.NoError(t, err) + require.Equal(t, common.HexToHash(testVector.CurrentRoot), actualRoot) + } + + // Add new bridge + err = tree.AddLeaves(ctx, []Leaf{{ + Index: uint32(len(testVector.ExistingLeaves)), + Hash: common.HexToHash(testVector.NewLeaf.CurrentHash), + }}) + require.NoError(t, err) + txRo, err := tree.db.BeginRo(ctx) + require.NoError(t, err) + _, actualRoot, err := tree.getLastIndexAndRoot(txRo) + txRo.Rollback() + require.NoError(t, err) + require.Equal(t, common.HexToHash(testVector.NewRoot), actualRoot) + }) + } +} + +func TestMTGetProof(t *testing.T) { + data, err := os.ReadFile("testvectors/claim-vectors.json") + require.NoError(t, err) + + var mtTestVectors []testvectors.MTClaimVectorRaw + err = json.Unmarshal(data, &mtTestVectors) + require.NoError(t, err) + ctx := context.Background() + + for ti, testVector := range mtTestVectors { + t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { + path := t.TempDir() + tree, err := NewAppendOnly(context.Background(), path, "foo") + require.NoError(t, err) + leaves := []Leaf{} + for li, leaf := range testVector.Deposits { + leaves = append(leaves, Leaf{ + Index: uint32(li), + Hash: leaf.Hash(), + }) + } + err = tree.AddLeaves(ctx, leaves) + require.NoError(t, err) + txRo, err := tree.db.BeginRo(ctx) + require.NoError(t, err) + _, actualRoot, err := tree.getLastIndexAndRoot(txRo) + txRo.Rollback() + expectedRoot := common.HexToHash(testVector.ExpectedRoot) + require.Equal(t, expectedRoot, actualRoot) + + proof, err := tree.GetProof(ctx, testVector.Index, expectedRoot) + require.NoError(t, err) + for i, sibling := range testVector.MerkleProof { + require.Equal(t, common.HexToHash(sibling), proof[i]) + } + }) + } +} diff --git a/tree/updatabletree.go b/tree/updatabletree.go new file mode 100644 index 00000000..74812b26 --- /dev/null +++ b/tree/updatabletree.go @@ -0,0 +1,25 @@ +package tree + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/common" +) + +type UpdatableTree struct { + *Tree +} + +func NewUpdatable(ctx context.Context, dbPath, dbPrefix string) (*UpdatableTree, error) { + t, err := newTree(dbPath, dbPrefix) + if err != nil { + return nil, err + } + ut := &UpdatableTree{Tree: t} + return ut, nil +} + +func (t *UpdatableTree) UpsertLeaf(ctx context.Context, index uint32, leafHash common.Hash, expectedRoot *common.Hash) error { + return errors.New("not implemented") +}