From 34ad89dc3666edb3f349bf11aa3014aa9ccbd0fa Mon Sep 17 00:00:00 2001 From: Arnau Date: Fri, 13 Sep 2024 15:30:44 +0200 Subject: [PATCH] add PR requests --- bridgesync/migrations/migrations.go | 26 ++++--------- bridgesync/processor.go | 6 +-- .../datacommittee/datacommittee_test.go | 3 +- db/interface.go | 12 +++++- db/migrations.go | 25 +++++++++++- db/tx.go | 23 ++++++++--- db/types/types.go | 7 ++++ l1infotreesync/e2e_test.go | 30 --------------- l1infotreesync/migrations/migrations.go | 38 +++++++++---------- l1infotreesync/processor.go | 4 +- tree/appendonlytree.go | 4 +- tree/migrations/migrations.go | 30 +++------------ tree/tree.go | 12 +++--- tree/updatabletree.go | 2 +- 14 files changed, 103 insertions(+), 119 deletions(-) create mode 100644 db/types/types.go diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go index a6ee27b3..c500ee38 100644 --- a/bridgesync/migrations/migrations.go +++ b/bridgesync/migrations/migrations.go @@ -2,32 +2,22 @@ package migrations import ( _ "embed" - "strings" "github.com/0xPolygon/cdk/db" + "github.com/0xPolygon/cdk/db/types" treeMigrations "github.com/0xPolygon/cdk/tree/migrations" - migrate "github.com/rubenv/sql-migrate" ) -const upDownSeparator = "-- +migrate Up" - //go:embed bridgesync0001.sql var mig001 string -var mig001splitted = strings.Split(mig001, upDownSeparator) -var bridgeMigrations = &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ { - Id: "bridgesync001", - Up: []string{mig001splitted[1]}, - Down: []string{mig001splitted[0]}, + ID: "bridgesync0001", + SQL: mig001, }, - }, -} - -func RunMigrations(dbPath string) error { - return db.RunMigrations(dbPath, &migrate.MemoryMigrationSource{Migrations: append( - bridgeMigrations.Migrations, - treeMigrations.Migrations.Migrations..., - )}) + } + migrations = append(migrations, treeMigrations.Migrations...) + return db.RunMigrations(dbPath, migrations) } diff --git a/bridgesync/processor.go b/bridgesync/processor.go index b34416a0..47b26595 100644 --- a/bridgesync/processor.go +++ b/bridgesync/processor.go @@ -174,7 +174,7 @@ func (p *processor) GetClaims( return claims, nil } -func (p *processor) queryBlockRange(tx db.DBer, fromBlock, toBlock uint64, table string) (*sql.Rows, error) { +func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, table string) (*sql.Rows, error) { if err := p.isBlockProcessed(tx, toBlock); err != nil { return nil, err } @@ -191,7 +191,7 @@ func (p *processor) queryBlockRange(tx db.DBer, fromBlock, toBlock uint64, table return rows, nil } -func (p *processor) isBlockProcessed(tx db.DBer, blockNum uint64) error { +func (p *processor) isBlockProcessed(tx db.Querier, blockNum uint64) error { lpb, err := p.getLastProcessedBlockWithTx(tx) if err != nil { return err @@ -208,7 +208,7 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return p.getLastProcessedBlockWithTx(p.db) } -func (p *processor) getLastProcessedBlockWithTx(tx db.DBer) (uint64, error) { +func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { var lastProcessedBlock uint64 row := tx.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") err := row.Scan(&lastProcessedBlock) diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index ad128324..fcacef3c 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -2,7 +2,6 @@ package datacommittee import ( "errors" - "fmt" "math/big" "testing" @@ -164,7 +163,7 @@ func deployDACProxy(auth *bind.TransactOpts, client bind.ContractBackend, dacImp if err != nil { return common.Address{}, err } - fmt.Println("DAC proxy deployed at", proxyAddr) + log.Debugf("DAC proxy deployed at", proxyAddr) return proxyAddr, nil } diff --git a/db/interface.go b/db/interface.go index acf73ca3..03f81aba 100644 --- a/db/interface.go +++ b/db/interface.go @@ -1,9 +1,17 @@ package db -import "database/sql" +import ( + "context" + "database/sql" +) -type DBer interface { +type Querier interface { Exec(query string, args ...interface{}) (sql.Result, error) Query(query string, args ...interface{}) (*sql.Rows, error) QueryRow(query string, args ...interface{}) *sql.Row } + +type DBer interface { + Querier + BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) +} diff --git a/db/migrations.go b/db/migrations.go index 07315375..1a56874e 100644 --- a/db/migrations.go +++ b/db/migrations.go @@ -2,22 +2,43 @@ package db import ( "fmt" + "strings" + "github.com/0xPolygon/cdk/db/types" "github.com/0xPolygon/cdk/log" _ "github.com/mattn/go-sqlite3" migrate "github.com/rubenv/sql-migrate" ) +const ( + upDownSeparator = "-- +migrate Up" + dbPrefixReplacer = "/*dbprefix*/" +) + // RunMigrations will execute pending migrations if needed to keep // the database updated with the latest changes in either direction, // up or down. -func RunMigrations(dbPath string, migrations migrate.MigrationSource) error { +func RunMigrations(dbPath string, migrations []types.Migration) error { db, err := NewSQLiteDB(dbPath) if err != nil { return fmt.Errorf("error creating DB %w", err) } + migs := &migrate.MemoryMigrationSource{Migrations: []*migrate.Migration{}} + for _, m := range migrations { + prefixed := strings.ReplaceAll(m.SQL, dbPrefixReplacer, m.Prefix) + splitted := strings.Split(prefixed, upDownSeparator) + migs.Migrations = append(migs.Migrations, &migrate.Migration{ + Id: m.Prefix + m.ID, + Up: []string{splitted[1]}, + Down: []string{splitted[0]}, + }) + } - nMigrations, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + log.Debugf("running migrations:") + for _, m := range migs.Migrations { + log.Debugf("%+v", m.Id) + } + nMigrations, err := migrate.Exec(db, "sqlite3", migs, migrate.Up) if err != nil { return fmt.Errorf("error executing migration %w", err) } diff --git a/db/tx.go b/db/tx.go index dc22750e..926da07c 100644 --- a/db/tx.go +++ b/db/tx.go @@ -2,22 +2,33 @@ package db import ( "context" - "database/sql" ) +type SQLTxer interface { + Querier + Commit() error + Rollback() error +} + +type Txer interface { + SQLTxer + AddRollbackCallback(cb func()) + AddCommitCallback(cb func()) +} + type Tx struct { - *sql.Tx + SQLTxer rollbackCallbacks []func() commitCallbacks []func() } -func NewTx(ctx context.Context, db *sql.DB) (*Tx, error) { +func NewTx(ctx context.Context, db DBer) (Txer, error) { tx, err := db.BeginTx(ctx, nil) if err != nil { return nil, err } return &Tx{ - Tx: tx, + SQLTxer: tx, }, nil } @@ -29,7 +40,7 @@ func (s *Tx) AddCommitCallback(cb func()) { } func (s *Tx) Commit() error { - if err := s.Tx.Commit(); err != nil { + if err := s.SQLTxer.Commit(); err != nil { return err } for _, cb := range s.commitCallbacks { @@ -39,7 +50,7 @@ func (s *Tx) Commit() error { } func (s *Tx) Rollback() error { - if err := s.Tx.Rollback(); err != nil { + if err := s.SQLTxer.Rollback(); err != nil { return err } for _, cb := range s.rollbackCallbacks { diff --git a/db/types/types.go b/db/types/types.go new file mode 100644 index 00000000..ade19092 --- /dev/null +++ b/db/types/types.go @@ -0,0 +1,7 @@ +package types + +type Migration struct { + ID string + SQL string + Prefix string +} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go index 989e347e..90f7f091 100644 --- a/l1infotreesync/e2e_test.go +++ b/l1infotreesync/e2e_test.go @@ -135,36 +135,6 @@ func TestE2E(t *testing.T) { } } -func TestFinalised(t *testing.T) { - ctx := context.Background() - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(1337)) - require.NoError(t, err) - client, _, _, _, _, err := newSimulatedClient(auth) //nolint:dogsled - require.NoError(t, err) - for i := 0; i < 100; i++ { - client.Commit() - } - - n4, err := client.Client().HeaderByNumber(ctx, big.NewInt(-4)) - require.NoError(t, err) - fmt.Println("-4", n4.Number) - n3, err := client.Client().HeaderByNumber(ctx, big.NewInt(-3)) - require.NoError(t, err) - fmt.Println("-3", n3.Number) - n2, err := client.Client().HeaderByNumber(ctx, big.NewInt(-2)) - require.NoError(t, err) - fmt.Println("-2", n2.Number) - n1, err := client.Client().HeaderByNumber(ctx, big.NewInt(-1)) - require.NoError(t, err) - fmt.Println("-1", n1.Number) - n0, err := client.Client().HeaderByNumber(ctx, nil) - require.NoError(t, err) - fmt.Println("0", n0.Number) - fmt.Printf("amount of blocks latest - finalised: %d", n0.Number.Uint64()-n3.Number.Uint64()) -} - func TestStressAndReorgs(t *testing.T) { const ( totalIterations = 200 // Have tested with much larger number (+10k) diff --git a/l1infotreesync/migrations/migrations.go b/l1infotreesync/migrations/migrations.go index e8861573..768dde37 100644 --- a/l1infotreesync/migrations/migrations.go +++ b/l1infotreesync/migrations/migrations.go @@ -2,40 +2,38 @@ package migrations import ( _ "embed" - "strings" "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" + "github.com/0xPolygon/cdk/db/types" treeMigrations "github.com/0xPolygon/cdk/tree/migrations" - migrate "github.com/rubenv/sql-migrate" ) const ( - upDownSeparator = "-- +migrate Up" RollupExitTreePrefix = "rollup_exit_" L1InfoTreePrefix = "l1_info_" ) //go:embed l1infotreesync0001.sql var mig001 string -var mig001splitted = strings.Split(mig001, upDownSeparator) -var Migrations = &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ +func RunMigrations(dbPath string) error { + migrations := []types.Migration{ { - Id: "l1infotreesync0001", - Up: []string{mig001splitted[1]}, - Down: []string{mig001splitted[0]}, + ID: "l1infotreesync0001", + SQL: mig001, }, - }, -} - -func RunMigrations(dbPath string) error { - migs := treeMigrations.MigrationsWithPrefix(RollupExitTreePrefix) - migs = append(migs, treeMigrations.MigrationsWithPrefix(L1InfoTreePrefix)...) - migs = append(migs, Migrations.Migrations...) - for _, m := range migs { - log.Debugf("%+v", m.Id) } - return db.RunMigrations(dbPath, &migrate.MemoryMigrationSource{Migrations: migs}) + for _, tm := range treeMigrations.Migrations { + migrations = append(migrations, types.Migration{ + ID: tm.ID, + SQL: tm.SQL, + Prefix: RollupExitTreePrefix, + }) + migrations = append(migrations, types.Migration{ + ID: tm.ID, + SQL: tm.SQL, + Prefix: L1InfoTreePrefix, + }) + } + return db.RunMigrations(dbPath, migrations) } diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go index b6292245..781d4478 100644 --- a/l1infotreesync/processor.go +++ b/l1infotreesync/processor.go @@ -178,7 +178,7 @@ func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { return p.getLastProcessedBlockWithTx(p.db) } -func (p *processor) getLastProcessedBlockWithTx(tx db.DBer) (uint64, error) { +func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { var lastProcessedBlock uint64 row := tx.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") err := row.Scan(&lastProcessedBlock) @@ -309,7 +309,7 @@ func (p *processor) ProcessBlock(ctx context.Context, b sync.Block) error { return nil } -func (p *processor) getLastIndex(tx db.DBer) (uint32, error) { +func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { var lastProcessedIndex uint32 row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") err := row.Scan(&lastProcessedIndex) diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go index d527a819..20d22ec1 100644 --- a/tree/appendonlytree.go +++ b/tree/appendonlytree.go @@ -28,7 +28,7 @@ func NewAppendOnlyTree(db *sql.DB, dbPrefix string) *AppendOnlyTree { } } -func (t *AppendOnlyTree) AddLeaf(tx *db.Tx, blockNum, blockPosition uint64, leaf types.Leaf) error { +func (t *AppendOnlyTree) AddLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { if int64(leaf.Index) != t.lastIndex+1 { // rebuild cache if err := t.initCache(tx); err != nil { @@ -78,7 +78,7 @@ func (t *AppendOnlyTree) AddLeaf(tx *db.Tx, blockNum, blockPosition uint64, leaf return nil } -func (t *AppendOnlyTree) initCache(tx *db.Tx) error { +func (t *AppendOnlyTree) initCache(tx db.Txer) error { siblings := [types.DefaultHeight]common.Hash{} lastRoot, err := t.getLastRootWithTx(tx) if err != nil { diff --git a/tree/migrations/migrations.go b/tree/migrations/migrations.go index 63fa6a8c..dd5847e7 100644 --- a/tree/migrations/migrations.go +++ b/tree/migrations/migrations.go @@ -2,41 +2,21 @@ package migrations import ( _ "embed" - "strings" "github.com/0xPolygon/cdk/db" - migrate "github.com/rubenv/sql-migrate" -) - -const ( - upDownSeparator = "-- +migrate Up" - dbPrefixReplacer = "/*dbprefix*/" + "github.com/0xPolygon/cdk/db/types" ) //go:embed tree0001.sql var mig001 string -var mig001splitted = strings.Split(mig001, upDownSeparator) -var Migrations = &migrate.MemoryMigrationSource{ - Migrations: []*migrate.Migration{ - { - Id: "tree001", - Up: []string{mig001splitted[1]}, - Down: []string{mig001splitted[0]}, - }, +var Migrations = []types.Migration{ + { + ID: "tree001", + SQL: mig001, }, } func RunMigrations(dbPath string) error { return db.RunMigrations(dbPath, Migrations) } - -func MigrationsWithPrefix(prefix string) []*migrate.Migration { - return []*migrate.Migration{ - { - Id: prefix + "tree001", - Up: []string{strings.ReplaceAll(mig001splitted[1], dbPrefixReplacer, prefix)}, - Down: []string{strings.ReplaceAll(mig001splitted[0], dbPrefixReplacer, prefix)}, - }, - } -} diff --git a/tree/tree.go b/tree/tree.go index 3a75e844..2107ba68 100644 --- a/tree/tree.go +++ b/tree/tree.go @@ -49,7 +49,7 @@ func newTree(db *sql.DB, tablePrefix string) *Tree { return t } -func (t *Tree) getSiblings(tx db.DBer, index uint32, root common.Hash) ( +func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( siblings [32]common.Hash, hasUsedZeroHashes bool, err error, @@ -118,7 +118,7 @@ func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (ty return siblings, nil } -func (t *Tree) getRHTNode(tx db.DBer, nodeHash common.Hash) (*types.TreeNode, error) { +func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, error) { node := &types.TreeNode{} err := meddler.QueryRow( tx, node, @@ -152,7 +152,7 @@ func generateZeroHashes(height uint8) []common.Hash { return zeroHashes } -func (t *Tree) storeNodes(tx *db.Tx, nodes []types.TreeNode) error { +func (t *Tree) storeNodes(tx db.Txer, nodes []types.TreeNode) error { for i := 0; i < len(nodes); i++ { if err := meddler.Insert(tx, t.rhtTable, &nodes[i]); err != nil { if sqliteErr, ok := db.SQLiteErr(err); ok { @@ -168,7 +168,7 @@ func (t *Tree) storeNodes(tx *db.Tx, nodes []types.TreeNode) error { return nil } -func (t *Tree) storeRoot(tx *db.Tx, root types.Root) error { +func (t *Tree) storeRoot(tx db.Txer, root types.Root) error { return meddler.Insert(tx, t.rootTable, &root) } @@ -177,7 +177,7 @@ func (t *Tree) GetLastRoot(ctx context.Context) (types.Root, error) { return t.getLastRootWithTx(t.db) } -func (t *Tree) getLastRootWithTx(tx db.DBer) (types.Root, error) { +func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { var root types.Root err := meddler.QueryRow( tx, &root, @@ -242,7 +242,7 @@ func (t *Tree) GetLeaf(ctx context.Context, index uint32, root common.Hash) (com } // Reorg deletes all the data relevant from firstReorgedBlock (includded) and onwards -func (t *Tree) Reorg(tx *db.Tx, firstReorgedBlock uint64) error { +func (t *Tree) Reorg(tx db.Txer, firstReorgedBlock uint64) error { _, err := tx.Exec( fmt.Sprintf(`DELETE FROM %s WHERE block_num >= $1`, t.rootTable), firstReorgedBlock, diff --git a/tree/updatabletree.go b/tree/updatabletree.go index 4e9f753f..3ed8b881 100644 --- a/tree/updatabletree.go +++ b/tree/updatabletree.go @@ -23,7 +23,7 @@ func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { return ut } -func (t *UpdatableTree) UpsertLeaf(tx *db.Tx, blockNum, blockPosition uint64, leaf types.Leaf) error { +func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { var rootHash common.Hash root, err := t.getLastRootWithTx(tx) if err != nil {