From 4d5212b6c4c09aa7db907120ba6106bd27299303 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 10:32:33 -0700 Subject: [PATCH 01/27] Implement AcquireAdvisoryLock and ReleaseAdvisoryLock. --- internal/db/utils.go | 28 +++++++++++++++ internal/db/utils_test.go | 75 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) create mode 100644 internal/db/utils.go create mode 100644 internal/db/utils_test.go diff --git a/internal/db/utils.go b/internal/db/utils.go new file mode 100644 index 00000000..a41aacc8 --- /dev/null +++ b/internal/db/utils.go @@ -0,0 +1,28 @@ +package db + +import ( + "context" + "fmt" +) + +// AcquireAdvisoryLock attempt to acquire an advisory lock on the provided lockKey, returns true if acquired, or false +// not. +func AcquireAdvisoryLock(ctx context.Context, dbConnectionPool ConnectionPool, lockKey int) (bool, error) { + tssAdvisoryLockAcquired := false + sqlQuery := "SELECT pg_try_advisory_lock($1)" + err := dbConnectionPool.QueryRowxContext(ctx, sqlQuery, lockKey).Scan(&tssAdvisoryLockAcquired) + if err != nil { + return false, fmt.Errorf("querying pg_try_advisory_lock(%v): %w", lockKey, err) + } + return tssAdvisoryLockAcquired, nil +} + +// ReleaseAdvisoryLock releases an advisory lock on the provided lockKey. +func ReleaseAdvisoryLock(ctx context.Context, dbConnectionPool ConnectionPool, lockKey int) error { + sqlQuery := "SELECT pg_advisory_unlock($1)" + _, err := dbConnectionPool.ExecContext(ctx, sqlQuery, lockKey) + if err != nil { + return fmt.Errorf("executing pg_advisory_unlock(%v): %w", lockKey, err) + } + return nil +} diff --git a/internal/db/utils_test.go b/internal/db/utils_test.go new file mode 100644 index 00000000..bbe6cc16 --- /dev/null +++ b/internal/db/utils_test.go @@ -0,0 +1,75 @@ +package db + +import ( + "context" + "crypto/rand" + "math/big" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stellar/wallet-backend/internal/db/dbtest" +) + +func TestAdvisoryLockAndRelease(t *testing.T) { + ctx := context.Background() + // Creates a test database: + dbt := dbtest.OpenWithoutMigrations(t) + defer dbt.Close() + + // Creates a database pool + randBigInt, err := rand.Int(rand.Reader, big.NewInt(90000)) + require.NoError(t, err) + lockKey := int(randBigInt.Int64()) + + t.Run("lock_acquired_can_be_released_on_dbConnClose", func(t *testing.T) { + dbConnectionPool1, err := OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + lockAcquired, err := AcquireAdvisoryLock(ctx, dbConnectionPool1, lockKey) + require.NoError(t, err) + require.True(t, lockAcquired, "should be able to acquire the lock") + + // Create another database pool + dbConnectionPool2, err := OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool2.Close() + lockAcquired2, err := AcquireAdvisoryLock(ctx, dbConnectionPool2, lockKey) + require.NoError(t, err) + require.False(t, lockAcquired2, "should not be able to acquire the lock since its already been acquired by dbConnectionPool1") + + // Close the original connection which releases the lock + err = dbConnectionPool1.Close() + require.NoError(t, err) + + // try to acquire the lock again + lockAcquired2, err = AcquireAdvisoryLock(ctx, dbConnectionPool2, lockKey) + require.NoError(t, err) + require.True(t, lockAcquired2, "should be able to acquire the lock since we called dbConnectionPool1.Close()") + }) + + t.Run("lock_acquired_can_be_released_on_ReleaseAdvisoryLock", func(t *testing.T) { + dbConnectionPool1, err := OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool1.Close() + lockAcquired, err := AcquireAdvisoryLock(ctx, dbConnectionPool1, lockKey) + require.NoError(t, err) + require.True(t, lockAcquired, "should be able to acquire the lock") + + // Create another database pool + dbConnectionPool2, err := OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool2.Close() + lockAcquired2, err := AcquireAdvisoryLock(ctx, dbConnectionPool2, lockKey) + require.NoError(t, err) + require.False(t, lockAcquired2, "should not be able to acquire the lock since its already been acquired by dbConnectionPool1") + + // Release the lock + err = ReleaseAdvisoryLock(ctx, dbConnectionPool1, lockKey) + require.NoError(t, err) + + // try to acquire the lock again + lockAcquired2, err = AcquireAdvisoryLock(ctx, dbConnectionPool2, lockKey) + require.NoError(t, err) + require.True(t, lockAcquired2, "should be able to acquire the lock since we called ReleaseAdvisoryLock") + }) +} From 715e8dce9d49a9e061496b6d38ab7584d93402aa Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 14:12:09 -0700 Subject: [PATCH 02/27] Enforce advisory lock on the ingest service --- internal/services/ingest.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 7c47aafc..d3f9dfbb 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -20,6 +20,8 @@ import ( "github.com/stellar/wallet-backend/internal/utils" ) +const advisoryLockID = int(3747555612780983) + const ( ingestHealthCheckMaxWaitTime = 90 * time.Second paymentPrometheusLabel = "payment" @@ -81,6 +83,19 @@ func NewIngestService( } func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger uint32) error { + // Acquire advisory lock to prevent multiple ingestion instances from running concurrently + if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.Payments.DB, advisoryLockID); err != nil { + return fmt.Errorf("acquiring advisory lock: %w", err) + } else if !lockAcquired { + return fmt.Errorf("advisory lock not acquired") + } + defer func() { + if err := db.ReleaseAdvisoryLock(ctx, m.models.Payments.DB, advisoryLockID); err != nil { + err = fmt.Errorf("releasing advisory lock: %w", err) + log.Ctx(ctx).Error(err) + } + }() + manualTriggerChannel := make(chan any, 1) go m.rpcService.TrackRPCServiceHealth(ctx, manualTriggerChannel) ingestHeartbeatChannel := make(chan any, 1) From d91fe9fb0760f793b7bbda6e1696fcaf60c1526f Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 14:59:44 -0700 Subject: [PATCH 03/27] Create a Model object to execute queries pertaining to the ingest_store database table. It was being executed by the PaymentsModel. --- internal/data/ingest_store.go | 52 +++++++++++ internal/data/ingest_store_test.go | 133 +++++++++++++++++++++++++++++ internal/data/models.go | 10 ++- internal/data/payments.go | 36 -------- internal/data/payments_test.go | 59 ------------- internal/services/ingest.go | 4 +- internal/services/ingest_test.go | 4 +- 7 files changed, 195 insertions(+), 103 deletions(-) create mode 100644 internal/data/ingest_store.go create mode 100644 internal/data/ingest_store_test.go diff --git a/internal/data/ingest_store.go b/internal/data/ingest_store.go new file mode 100644 index 00000000..3e89e049 --- /dev/null +++ b/internal/data/ingest_store.go @@ -0,0 +1,52 @@ +package data + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/stellar/wallet-backend/internal/db" + "github.com/stellar/wallet-backend/internal/metrics" +) + +type IngestStoreModel struct { + DB db.ConnectionPool + MetricsService metrics.MetricsService +} + +func (m *IngestStoreModel) GetLatestLedgerSynced(ctx context.Context, cursorName string) (uint32, error) { + var lastSyncedLedger uint32 + start := time.Now() + err := m.DB.GetContext(ctx, &lastSyncedLedger, `SELECT value FROM ingest_store WHERE key = $1`, cursorName) + duration := time.Since(start).Seconds() + m.MetricsService.ObserveDBQueryDuration("SELECT", "ingest_store", duration) + m.MetricsService.IncDBQuery("SELECT", "ingest_store") + // First run, key does not exist yet + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + if err != nil { + return 0, fmt.Errorf("getting latest ledger synced for cursor %s: %w", cursorName, err) + } + + return lastSyncedLedger, nil +} + +func (m *IngestStoreModel) UpdateLatestLedgerSynced(ctx context.Context, cursorName string, ledger uint32) error { + const query = ` + INSERT INTO ingest_store (key, value) VALUES ($1, $2) + ON CONFLICT (key) DO UPDATE SET value = excluded.value + ` + start := time.Now() + _, err := m.DB.ExecContext(ctx, query, cursorName, ledger) + duration := time.Since(start).Seconds() + m.MetricsService.ObserveDBQueryDuration("INSERT", "ingest_store", duration) + if err != nil { + return fmt.Errorf("updating last synced ledger to %d: %w", ledger, err) + } + m.MetricsService.IncDBQuery("INSERT", "ingest_store") + + return nil +} diff --git a/internal/data/ingest_store_test.go b/internal/data/ingest_store_test.go new file mode 100644 index 00000000..6293b31e --- /dev/null +++ b/internal/data/ingest_store_test.go @@ -0,0 +1,133 @@ +package data + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/stellar/wallet-backend/internal/db" + "github.com/stellar/wallet-backend/internal/db/dbtest" + "github.com/stellar/wallet-backend/internal/metrics" +) + +func Test_IngestStoreModel_GetLatestLedgerSynced(t *testing.T) { + dbt := dbtest.Open(t) + defer dbt.Close() + dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool.Close() + + ctx := context.Background() + + testCases := []struct { + name string + key string + setupDB func(t *testing.T) + expectedLedger uint32 + }{ + { + name: "returns_0_if_key_does_not_exist", + key: "ingest_store_key", + expectedLedger: 0, + }, + { + name: "returns_value_if_key_exists", + key: "ingest_store_key", + setupDB: func(t *testing.T) { + _, err := dbConnectionPool.ExecContext(ctx, `INSERT INTO ingest_store (key, value) VALUES ($1, $2)`, "ingest_store_key", 123) + require.NoError(t, err) + }, + expectedLedger: 123, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := dbConnectionPool.ExecContext(ctx, "DELETE FROM ingest_store") + require.NoError(t, err) + + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("ObserveDBQueryDuration", "SELECT", "ingest_store", mock.Anything).Return(). + On("IncDBQuery", "SELECT", "ingest_store").Return() + defer mockMetricsService.AssertExpectations(t) + + m := &IngestStoreModel{ + DB: dbConnectionPool, + MetricsService: mockMetricsService, + } + if tc.setupDB != nil { + tc.setupDB(t) + } + + lastSyncedLedger, err := m.GetLatestLedgerSynced(ctx, tc.key) + require.NoError(t, err) + assert.Equal(t, tc.expectedLedger, lastSyncedLedger) + }) + } +} + +func Test_IngestStoreModel_UpdateLatestLedgerSynced(t *testing.T) { + dbt := dbtest.Open(t) + defer dbt.Close() + dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool.Close() + + ctx := context.Background() + + testCases := []struct { + name string + key string + ledgerToUpsert uint32 + setupDB func(t *testing.T) + }{ + { + name: "inserts_if_key_does_not_exist", + key: "ingest_store_key", + ledgerToUpsert: 123, + }, + { + name: "updates_if_key_exists", + key: "ingest_store_key", + setupDB: func(t *testing.T) { + _, err := dbConnectionPool.ExecContext(ctx, `INSERT INTO ingest_store (key, value) VALUES ($1, $2)`, "ingest_store_key", 123) + require.NoError(t, err) + }, + ledgerToUpsert: 456, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := dbConnectionPool.ExecContext(ctx, "DELETE FROM ingest_store") + require.NoError(t, err) + + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("ObserveDBQueryDuration", "INSERT", "ingest_store", mock.Anything).Return().Once(). + On("IncDBQuery", "INSERT", "ingest_store").Return().Once() + defer mockMetricsService.AssertExpectations(t) + + m := &IngestStoreModel{ + DB: dbConnectionPool, + MetricsService: mockMetricsService, + } + + if tc.setupDB != nil { + tc.setupDB(t) + } + + err = m.UpdateLatestLedgerSynced(ctx, tc.key, tc.ledgerToUpsert) + require.NoError(t, err) + + var dbStoredLedger uint32 + err = m.DB.GetContext(ctx, &dbStoredLedger, `SELECT value FROM ingest_store WHERE key = $1`, tc.key) + require.NoError(t, err) + assert.Equal(t, tc.ledgerToUpsert, dbStoredLedger) + }) + } +} diff --git a/internal/data/models.go b/internal/data/models.go index 3457f713..f5d574e3 100644 --- a/internal/data/models.go +++ b/internal/data/models.go @@ -8,8 +8,9 @@ import ( ) type Models struct { - Payments *PaymentModel - Account *AccountModel + Account *AccountModel + IngestStore *IngestStoreModel + Payments *PaymentModel } func NewModels(db db.ConnectionPool, metricsService metrics.MetricsService) (*Models, error) { @@ -18,7 +19,8 @@ func NewModels(db db.ConnectionPool, metricsService metrics.MetricsService) (*Mo } return &Models{ - Payments: &PaymentModel{DB: db, MetricsService: metricsService}, - Account: &AccountModel{DB: db, MetricsService: metricsService}, + Account: &AccountModel{DB: db, MetricsService: metricsService}, + IngestStore: &IngestStoreModel{DB: db, MetricsService: metricsService}, + Payments: &PaymentModel{DB: db, MetricsService: metricsService}, }, nil } diff --git a/internal/data/payments.go b/internal/data/payments.go index 4c9f0f50..51ead947 100644 --- a/internal/data/payments.go +++ b/internal/data/payments.go @@ -2,7 +2,6 @@ package data import ( "context" - "database/sql" "errors" "fmt" "time" @@ -36,41 +35,6 @@ type Payment struct { MemoType string `db:"memo_type" json:"memoType"` } -func (m *PaymentModel) GetLatestLedgerSynced(ctx context.Context, cursorName string) (uint32, error) { - var lastSyncedLedger uint32 - start := time.Now() - err := m.DB.GetContext(ctx, &lastSyncedLedger, `SELECT value FROM ingest_store WHERE key = $1`, cursorName) - duration := time.Since(start).Seconds() - m.MetricsService.ObserveDBQueryDuration("SELECT", "ingest_store", duration) - m.MetricsService.IncDBQuery("SELECT", "ingest_store") - // First run, key does not exist yet - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - if err != nil { - return 0, fmt.Errorf("getting latest ledger synced for cursor %s: %w", cursorName, err) - } - - return lastSyncedLedger, nil -} - -func (m *PaymentModel) UpdateLatestLedgerSynced(ctx context.Context, cursorName string, ledger uint32) error { - const query = ` - INSERT INTO ingest_store (key, value) VALUES ($1, $2) - ON CONFLICT (key) DO UPDATE SET value = excluded.value - ` - start := time.Now() - _, err := m.DB.ExecContext(ctx, query, cursorName, ledger) - duration := time.Since(start).Seconds() - m.MetricsService.ObserveDBQueryDuration("INSERT", "ingest_store", duration) - if err != nil { - return fmt.Errorf("updating last synced ledger to %d: %w", ledger, err) - } - m.MetricsService.IncDBQuery("INSERT", "ingest_store") - - return nil -} - func (m *PaymentModel) AddPayment(ctx context.Context, tx db.Transaction, payment Payment) error { const query = ` INSERT INTO ingest_payments ( diff --git a/internal/data/payments_test.go b/internal/data/payments_test.go index 2e12e94d..3db96858 100644 --- a/internal/data/payments_test.go +++ b/internal/data/payments_test.go @@ -148,65 +148,6 @@ func TestPaymentModelAddPayment(t *testing.T) { }) } -func TestPaymentModelGetLatestLedgerSynced(t *testing.T) { - dbt := dbtest.Open(t) - defer dbt.Close() - dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) - require.NoError(t, err) - defer dbConnectionPool.Close() - - mockMetricsService := metrics.NewMockMetricsService() - mockMetricsService.On("ObserveDBQueryDuration", "SELECT", "ingest_store", mock.Anything).Return().Times(2) - mockMetricsService.On("IncDBQuery", "SELECT", "ingest_store").Return().Times(2) - defer mockMetricsService.AssertExpectations(t) - - ctx := context.Background() - m := &PaymentModel{ - DB: dbConnectionPool, - MetricsService: mockMetricsService, - } - - const key = "ingest_store_key" - lastSyncedLedger, err := m.GetLatestLedgerSynced(ctx, key) - require.NoError(t, err) - assert.Equal(t, uint32(0), lastSyncedLedger) - - _, err = dbConnectionPool.ExecContext(ctx, `INSERT INTO ingest_store (key, value) VALUES ($1, $2)`, key, 123) - require.NoError(t, err) - - lastSyncedLedger, err = m.GetLatestLedgerSynced(ctx, key) - require.NoError(t, err) - assert.Equal(t, uint32(123), lastSyncedLedger) -} - -func TestPaymentModelUpdateLatestLedgerSynced(t *testing.T) { - dbt := dbtest.Open(t) - defer dbt.Close() - dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) - require.NoError(t, err) - defer dbConnectionPool.Close() - - mockMetricsService := metrics.NewMockMetricsService() - mockMetricsService.On("ObserveDBQueryDuration", "INSERT", "ingest_store", mock.Anything).Return().Times(1) - mockMetricsService.On("IncDBQuery", "INSERT", "ingest_store").Return().Times(1) - defer mockMetricsService.AssertExpectations(t) - - ctx := context.Background() - m := &PaymentModel{ - DB: dbConnectionPool, - MetricsService: mockMetricsService, - } - - const key = "ingest_store_key" - err = m.UpdateLatestLedgerSynced(ctx, key, 123) - require.NoError(t, err) - - var lastSyncedLedger uint32 - err = m.DB.GetContext(ctx, &lastSyncedLedger, `SELECT value FROM ingest_store WHERE key = $1`, key) - require.NoError(t, err) - assert.Equal(t, uint32(123), lastSyncedLedger) -} - func TestPaymentModelGetPaymentsPaginated(t *testing.T) { dbt := dbtest.Open(t) defer dbt.Close() diff --git a/internal/services/ingest.go b/internal/services/ingest.go index d3f9dfbb..db356317 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -104,7 +104,7 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u if startLedger == 0 { var err error - startLedger, err = m.models.Payments.GetLatestLedgerSynced(ctx, m.ledgerCursorName) + startLedger, err = m.models.IngestStore.GetLatestLedgerSynced(ctx, m.ledgerCursorName) if err != nil { return fmt.Errorf("erorr getting start ledger: %w", err) } @@ -152,7 +152,7 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u } // update cursor - err = m.models.Payments.UpdateLatestLedgerSynced(ctx, m.ledgerCursorName, ingestLedger) + err = m.models.IngestStore.UpdateLatestLedgerSynced(ctx, m.ledgerCursorName, ingestLedger) if err != nil { return fmt.Errorf("updating latest synced ledger: %w", err) } diff --git a/internal/services/ingest_test.go b/internal/services/ingest_test.go index a4695bd2..271022f2 100644 --- a/internal/services/ingest_test.go +++ b/internal/services/ingest_test.go @@ -457,7 +457,7 @@ func TestIngest_LatestSyncedLedgerBehindRPC(t *testing.T) { mockRPCService.AssertNotCalled(t, "GetTransactions", int64(49), "", int64(50)) mockRPCService.AssertExpectations(t) - ledger, err := models.Payments.GetLatestLedgerSynced(context.Background(), "ingestionLedger") + ledger, err := models.IngestStore.GetLatestLedgerSynced(context.Background(), "ingestionLedger") require.NoError(t, err) assert.Equal(t, uint32(50), ledger) } @@ -561,7 +561,7 @@ func TestIngest_LatestSyncedLedgerAheadOfRPC(t *testing.T) { assert.Contains(t, logOutput, expectedLog) // Verify the ledger was eventually processed - ledger, err := models.Payments.GetLatestLedgerSynced(context.Background(), "ingestionLedger") + ledger, err := models.IngestStore.GetLatestLedgerSynced(context.Background(), "ingestionLedger") require.NoError(t, err) assert.Equal(t, uint32(100), ledger) From affdfd272f29539a17c4f36e76ca423d27c8ad5a Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 15:01:57 -0700 Subject: [PATCH 04/27] Stop using `models.Payment.DB` and use `models.DB` instead. --- internal/data/models.go | 2 ++ internal/services/ingest.go | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/internal/data/models.go b/internal/data/models.go index f5d574e3..4f10f70c 100644 --- a/internal/data/models.go +++ b/internal/data/models.go @@ -8,6 +8,7 @@ import ( ) type Models struct { + DB db.ConnectionPool Account *AccountModel IngestStore *IngestStoreModel Payments *PaymentModel @@ -19,6 +20,7 @@ func NewModels(db db.ConnectionPool, metricsService metrics.MetricsService) (*Mo } return &Models{ + DB: db, Account: &AccountModel{DB: db, MetricsService: metricsService}, IngestStore: &IngestStoreModel{DB: db, MetricsService: metricsService}, Payments: &PaymentModel{DB: db, MetricsService: metricsService}, diff --git a/internal/services/ingest.go b/internal/services/ingest.go index db356317..a2768384 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -84,13 +84,13 @@ func NewIngestService( func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger uint32) error { // Acquire advisory lock to prevent multiple ingestion instances from running concurrently - if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.Payments.DB, advisoryLockID); err != nil { + if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { return fmt.Errorf("acquiring advisory lock: %w", err) } else if !lockAcquired { return fmt.Errorf("advisory lock not acquired") } defer func() { - if err := db.ReleaseAdvisoryLock(ctx, m.models.Payments.DB, advisoryLockID); err != nil { + if err := db.ReleaseAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { err = fmt.Errorf("releasing advisory lock: %w", err) log.Ctx(ctx).Error(err) } @@ -194,7 +194,7 @@ func (m *ingestService) GetLedgerTransactions(ledger int64) ([]entities.Transact } func (m *ingestService) ingestPayments(ctx context.Context, ledgerTransactions []entities.Transaction) error { - err := db.RunInTransaction(ctx, m.models.Payments.DB, nil, func(dbTx db.Transaction) error { + err := db.RunInTransaction(ctx, m.models.DB, nil, func(dbTx db.Transaction) error { paymentOpsIngested := 0 pathPaymentStrictSendOpsIngested := 0 pathPaymentStrictReceiveOpsIngested := 0 From 18ba6d32a76833f0b718633f34e8eea0ff651f33 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 15:55:58 -0700 Subject: [PATCH 05/27] Create helper method to calculate the range of ledgers to fetch. --- internal/services/ingest.go | 25 ++++++++++++ internal/services/ingest_test.go | 69 ++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index a2768384..2c15a5a6 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -82,6 +82,31 @@ func NewIngestService( }, nil } +type LedgerSeqRange struct { + Start uint32 + End uint32 +} + +const maxLedgerWindow = 1 // NOTE: cannot be larger than 200 + +// getLedgerSeqRange returns a ledger sequence range to ingest. It takes into account: +// - the ledgers available in the RPC, +// - the latest ledger synced by the ingestion service, +// - the max ledger window to ingest. +// +// The returned ledger sequence range is inclusive of the start and end ledgers. +func getLedgerSeqRange(rpcOldestLedger, rpcNewestLedger, latestLedgerSynced uint32) (LedgerSeqRange, bool) { + if latestLedgerSynced >= rpcNewestLedger { + return LedgerSeqRange{}, true + } + + var ledgerRange LedgerSeqRange + ledgerRange.Start = max(latestLedgerSynced+1, rpcOldestLedger) + ledgerRange.End = min(ledgerRange.Start+maxLedgerWindow, rpcNewestLedger) + + return ledgerRange, false +} + func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger uint32) error { // Acquire advisory lock to prevent multiple ingestion instances from running concurrently if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { diff --git a/internal/services/ingest_test.go b/internal/services/ingest_test.go index 271022f2..ea1d11a2 100644 --- a/internal/services/ingest_test.go +++ b/internal/services/ingest_test.go @@ -32,6 +32,75 @@ const ( testFeeBumpTxXDR = "AAAABQAAAACRDhlb19H9O6EVQnLPSBX5kH4+ycO03nl6OOK1drSinwAAAAAAAAGQAAAAAgAAAAC//CoiAsv/SQfZHUYwg1/5F127eo+Rv6b9lf6GbIJNygAAAGQAAAAAAAAAAgAAAAEAAAAAAAAAAAAAAABoI7JqAAAAAAAAAAEAAAAAAAAAAAAAAADcrhjQMMeoVosXGSgLrC4WhXYLHl1HcUniEWKOGTyPEAAAAAAAmJaAAAAAAAAAAAFsgk3KAAAAQEYaesICeGfKcUiMEYoZZrKptMmMcW8636peWLpChKukfqTxSujQilalxe6ab+en9Bhf8iGMF8jb5JqIIYlYjQsAAAAAAAAAAXa0op8AAABADjCsmF/xr9jXwNStUM7YqXEd49qfbvGZPJPplANW7aiErkHWxEj6C2RVOyPyK8KBr1fjCleBSmDZjD1X0kkJCQ==" ) +func Test_getLedgerSeqRange(t *testing.T) { + testCases := []struct { + name string + latestLedgerSynced uint32 + rpcOldestLedger uint32 + rpcNewestLedger uint32 + wantInSync bool + wantResult LedgerSeqRange + }{ + { + name: "latest_synced_behind_rpc_oldest", + latestLedgerSynced: 5, + rpcOldestLedger: 10, + rpcNewestLedger: 20, + wantInSync: false, + wantResult: LedgerSeqRange{ + Start: 10, + End: 10 + maxLedgerWindow, + }, + }, + { + name: "latest_synced_equals_rpc_oldest", + latestLedgerSynced: 10, + rpcOldestLedger: 10, + rpcNewestLedger: 20, + wantInSync: false, + wantResult: LedgerSeqRange{ + Start: 11, + End: 11 + maxLedgerWindow, + }, + }, + { + name: "latest_synced_ahead_of_rpc_oldest", + rpcOldestLedger: 10, + rpcNewestLedger: 20, + latestLedgerSynced: 15, + wantInSync: false, + wantResult: LedgerSeqRange{ + Start: 16, + End: 16 + maxLedgerWindow, + }, + }, + { + name: "latest_synced_equals_rpc_newest", + rpcOldestLedger: 10, + rpcNewestLedger: 20, + latestLedgerSynced: 20, + wantInSync: true, + wantResult: LedgerSeqRange{}, + }, + { + name: "latest_synced_ahead_of_rpc_newest", + rpcOldestLedger: 10, + rpcNewestLedger: 20, + latestLedgerSynced: 25, + wantInSync: true, + wantResult: LedgerSeqRange{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ledgerRange, inSync := getLedgerSeqRange(tc.rpcOldestLedger, tc.rpcNewestLedger, tc.latestLedgerSynced) + assert.Equal(t, tc.wantResult, ledgerRange) + assert.Equal(t, tc.wantInSync, inSync) + }) + } +} + func TestGetLedgerTransactions(t *testing.T) { dbt := dbtest.Open(t) defer dbt.Close() From eced2d134b67a235640a96daa28eb66c480d6f76 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Thu, 12 Jun 2025 16:32:23 -0700 Subject: [PATCH 06/27] [WIP] implement getLedgers. --- go.mod | 1 + go.sum | 6 ++++-- internal/services/rpc_service.go | 21 +++++++++++++++++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 2ac71666..e50347f2 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/spf13/viper v1.20.1 github.com/stellar/go v0.0.0-20250417130535-10df2019148f github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2 + github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5 github.com/stretchr/testify v1.10.0 golang.org/x/term v0.31.0 golang.org/x/text v0.24.0 diff --git a/go.sum b/go.sum index e3f0eac5..5181ccb9 100644 --- a/go.sum +++ b/go.sum @@ -185,8 +185,8 @@ github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsF github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2 h1:S4OC0+OBKz6mJnzuHioeEat74PuQ4Sgvbf8eus695sc= github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2/go.mod h1:8zLRYR5npGjaOXgPSKat5+oOh+UHd8OdbS18iqX9F6Y= -github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca h1:oR/RycYTFTVXzND5r4FdsvbnBn0HJXSVeNAnwaTXRwk= -github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -207,6 +207,8 @@ github.com/stellar/go v0.0.0-20250417130535-10df2019148f h1:+aEOop0hlhwYuIlJhKAS github.com/stellar/go v0.0.0-20250417130535-10df2019148f/go.mod h1:wE/ZDmjys55VprPR5qx5Ojx0cUi3f7MJ+dc5gzM+03k= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2 h1:OzCVd0SV5qE3ZcDeSFCmOWLZfEWZ3Oe8KtmSOYKEVWE= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2/go.mod h1:yoxyU/M8nl9LKeWIoBrbDPQ7Cy+4jxRcWcOayZ4BMps= +github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5 h1:V+XezRLVHuk6c1nMkXkWjCwtoHN7F+DK86dK2kkNSZo= +github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5/go.mod h1:21zn7aUjDQZih77MDIFfsVN5Cjdiv1sMh+V51xgZwRw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= diff --git a/internal/services/rpc_service.go b/internal/services/rpc_service.go index 63ae2aef..7bda541a 100644 --- a/internal/services/rpc_service.go +++ b/internal/services/rpc_service.go @@ -11,6 +11,7 @@ import ( "github.com/stellar/go/support/log" "github.com/stellar/go/xdr" + "github.com/stellar/stellar-rpc/protocol" "github.com/stellar/wallet-backend/internal/entities" "github.com/stellar/wallet-backend/internal/metrics" @@ -155,6 +156,26 @@ func (r *rpcService) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntri return result, nil } +func (r *rpcService) GetLedgers(startLedger int64, limit int) (protocol.GetLedgersResponse, error) { + resultBytes, err := r.sendRPCRequest("getLedgers", entities.RPCParams{ + StartLedger: startLedger, + Pagination: entities.RPCPagination{ + Limit: limit, + }, + }) + if err != nil { + return protocol.GetLedgersResponse{}, fmt.Errorf("sending getLedgers request: %w", err) + } + + var result protocol.GetLedgersResponse + err = json.Unmarshal(resultBytes, &result) + if err != nil { + return protocol.GetLedgersResponse{}, fmt.Errorf("parsing getLedgers result JSON: %w", err) + } + + return result, nil +} + func (r *rpcService) SendTransaction(transactionXDR string) (entities.RPCSendTransactionResult, error) { resultBytes, err := r.sendRPCRequest("sendTransaction", entities.RPCParams{Transaction: transactionXDR}) if err != nil { From 1416bad18a80289e74dfddb6c141936815f70fe0 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Fri, 13 Jun 2025 18:40:32 -0700 Subject: [PATCH 07/27] [WIP] start fetching ledgers. --- internal/ingest/ingest.go | 2 +- internal/services/ingest.go | 131 ++++++++++++++++++++++++++++--- internal/services/mocks.go | 8 +- internal/services/rpc_service.go | 20 +++-- 4 files changed, 142 insertions(+), 19 deletions(-) diff --git a/internal/ingest/ingest.go b/internal/ingest/ingest.go index 459b0c6a..4f9e0871 100644 --- a/internal/ingest/ingest.go +++ b/internal/ingest/ingest.go @@ -38,7 +38,7 @@ func Ingest(cfg Configs) error { } if err = ingestService.Run(ctx, uint32(cfg.StartLedger), uint32(cfg.EndLedger)); err != nil { - log.Ctx(ctx).Fatalf("Running ingest from %d to %d: %v", cfg.StartLedger, cfg.EndLedger, err) + log.Ctx(ctx).Fatalf("Error running `ingest` from %d to %d: %v", cfg.StartLedger, cfg.EndLedger, err) } return nil diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 2c15a5a6..64f518f7 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "os" + "os/signal" + "syscall" "time" "github.com/stellar/go/support/log" @@ -20,6 +23,8 @@ import ( "github.com/stellar/wallet-backend/internal/utils" ) +var ErrAlreadyInSync = errors.New("ingestion is already in sync") + const advisoryLockID = int(3747555612780983) const ( @@ -82,11 +87,6 @@ func NewIngestService( }, nil } -type LedgerSeqRange struct { - Start uint32 - End uint32 -} - const maxLedgerWindow = 1 // NOTE: cannot be larger than 200 // getLedgerSeqRange returns a ledger sequence range to ingest. It takes into account: @@ -95,14 +95,13 @@ const maxLedgerWindow = 1 // NOTE: cannot be larger than 200 // - the max ledger window to ingest. // // The returned ledger sequence range is inclusive of the start and end ledgers. -func getLedgerSeqRange(rpcOldestLedger, rpcNewestLedger, latestLedgerSynced uint32) (LedgerSeqRange, bool) { +func getLedgerSeqRange(rpcOldestLedger, rpcNewestLedger, latestLedgerSynced uint32) (ledgerRange LedgerSeqRange, inSync bool) { if latestLedgerSynced >= rpcNewestLedger { return LedgerSeqRange{}, true } - var ledgerRange LedgerSeqRange ledgerRange.Start = max(latestLedgerSynced+1, rpcOldestLedger) - ledgerRange.End = min(ledgerRange.Start+maxLedgerWindow, rpcNewestLedger) + ledgerRange.End = min(ledgerRange.Start+(maxLedgerWindow-1), rpcNewestLedger) return ledgerRange, false } @@ -112,7 +111,119 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { return fmt.Errorf("acquiring advisory lock: %w", err) } else if !lockAcquired { - return fmt.Errorf("advisory lock not acquired") + return errors.New("advisory lock not acquired") + } + defer func() { + if err := db.ReleaseAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { + err = fmt.Errorf("releasing advisory lock: %w", err) + log.Ctx(ctx).Error(err) + } + }() + + // get latest ledger synced, to use as a cursor + if startLedger == 0 { + var err error + startLedger, err = m.models.IngestStore.GetLatestLedgerSynced(ctx, m.ledgerCursorName) + if err != nil { + return fmt.Errorf("getting latest ledger synced: %w", err) + } + } + + ticker := time.NewTicker(time.Second * 5) + defer ticker.Stop() + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT) + defer signal.Stop(signalChan) + manualTriggerChan := make(chan any, 1) + + log.Ctx(ctx).Info("Starting ingestion loop") + for { + select { + case sig := <-signalChan: + log.Ctx(ctx).Info("Signal received") + return fmt.Errorf("received signal %s while waiting for RPC service to become healthy", sig) + case <-ctx.Done(): + log.Ctx(ctx).Info("Context cancelled") + return fmt.Errorf("context cancelled: %w", ctx.Err()) + case <-manualTriggerChan: + log.Ctx(ctx).Info("Manual trigger received") + fallthrough + case <-ticker.C: + log.Ctx(ctx).Info("Ticker ticked") + totalIngestionStart := time.Now() + // fetch ledgers + getLedgersResponse, err := m.fetchNextLedgersBatch(ctx, startLedger) + if err != nil { + return fmt.Errorf("fetching next ledgers batch: %w", err) + } + if errors.Is(err, ErrAlreadyInSync) { + log.Ctx(ctx).Info("Ingestion is already in sync, will retry in a few moments...") + continue + } + + // process ledgers + err = m.processLedgerResponse(ctx, getLedgersResponse) + if err != nil { + return fmt.Errorf("processing ledger response: %w", err) + } + + // update cursor + startLedger = getLedgersResponse.Ledgers[len(getLedgersResponse.Ledgers)-1].Sequence + err = m.models.IngestStore.UpdateLatestLedgerSynced(ctx, m.ledgerCursorName, startLedger) + if err != nil { + return fmt.Errorf("updating latest synced ledger: %w", err) + } + m.metricsService.SetLatestLedgerIngested(float64(getLedgersResponse.LatestLedger)) + m.metricsService.ObserveIngestionDuration(totalIngestionPrometheusLabel, time.Since(totalIngestionStart).Seconds()) + + // Trigger another ingestion immediately if we're behind + rpcHealth, err := m.rpcService.GetHealth() + if err != nil { + log.Ctx(ctx).Errorf("Failed to get RPC health to check if we're behind: %v", err) + continue + } + if rpcHealth.LatestLedger-startLedger > 1 { + manualTriggerChan <- true + } + } + } +} + +func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersResponse GetLedgersResponse) error { + log.Ctx(ctx).Warnf("🚧 TODO: process & ingest ledger response") + var sequences []uint32 + for _, ledger := range getLedgersResponse.Ledgers { + sequences = append(sequences, ledger.Sequence) + } + log.Ctx(ctx).Debugf("sequences(%d): %v", len(sequences), sequences) + return nil +} + +func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger uint32) (GetLedgersResponse, error) { + rpcHealth, err := m.rpcService.GetHealth() + if err != nil { + return GetLedgersResponse{}, fmt.Errorf("getting rpc health: %w", err) + } + ledgerSeqRange, inSync := getLedgerSeqRange(rpcHealth.OldestLedger, rpcHealth.LatestLedger, startLedger) + log.Ctx(ctx).Debugf("ledgerSeqRange: %v", ledgerSeqRange) + if inSync { + return GetLedgersResponse{}, ErrAlreadyInSync + } + + getLedgersResponse, err := m.rpcService.GetLedgers(ledgerSeqRange) + if err != nil { + return GetLedgersResponse{}, fmt.Errorf("getting ledgers: %w", err) + } + + return getLedgersResponse, nil +} + +func (m *ingestService) RunOld(ctx context.Context, startLedger uint32, endLedger uint32) error { + // Acquire advisory lock to prevent multiple ingestion instances from running concurrently + if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { + return fmt.Errorf("acquiring advisory lock: %w", err) + } else if !lockAcquired { + return errors.New("advisory lock not acquired") } defer func() { if err := db.ReleaseAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { @@ -131,7 +242,7 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u var err error startLedger, err = m.models.IngestStore.GetLatestLedgerSynced(ctx, m.ledgerCursorName) if err != nil { - return fmt.Errorf("erorr getting start ledger: %w", err) + return fmt.Errorf("getting latest ledger synced: %w", err) } } diff --git a/internal/services/mocks.go b/internal/services/mocks.go index 2ec814b8..f33779bc 100644 --- a/internal/services/mocks.go +++ b/internal/services/mocks.go @@ -3,9 +3,8 @@ package services import ( "context" - "github.com/stretchr/testify/mock" - "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/mock" "github.com/stellar/wallet-backend/internal/entities" ) @@ -45,6 +44,11 @@ func (r *RPCServiceMock) GetHealth() (entities.RPCGetHealthResult, error) { return args.Get(0).(entities.RPCGetHealthResult), args.Error(1) } +func (r *RPCServiceMock) GetLedgers(ledgerSeqRange LedgerSeqRange) (GetLedgersResponse, error) { + args := r.Called(ledgerSeqRange) + return args.Get(0).(GetLedgersResponse), args.Error(1) +} + func (r *RPCServiceMock) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntriesResult, error) { args := r.Called(keys) return args.Get(0).(entities.RPCGetLedgerEntriesResult), args.Error(1) diff --git a/internal/services/rpc_service.go b/internal/services/rpc_service.go index 7bda541a..39a6c1f8 100644 --- a/internal/services/rpc_service.go +++ b/internal/services/rpc_service.go @@ -29,6 +29,7 @@ type RPCService interface { GetTransactions(startLedger int64, startCursor string, limit int) (entities.RPCGetTransactionsResult, error) SendTransaction(transactionXDR string) (entities.RPCSendTransactionResult, error) GetHealth() (entities.RPCGetHealthResult, error) + GetLedgers(ledgerSeqRange LedgerSeqRange) (GetLedgersResponse, error) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntriesResult, error) GetAccountLedgerSequence(address string) (int64, error) GetHeartbeatChannel() chan entities.RPCGetHealthResult @@ -156,21 +157,28 @@ func (r *rpcService) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntri return result, nil } -func (r *rpcService) GetLedgers(startLedger int64, limit int) (protocol.GetLedgersResponse, error) { +type LedgerSeqRange struct { + Start uint32 + End uint32 +} + +type GetLedgersResponse protocol.GetLedgersResponse + +func (r *rpcService) GetLedgers(ledgerSeqRange LedgerSeqRange) (GetLedgersResponse, error) { resultBytes, err := r.sendRPCRequest("getLedgers", entities.RPCParams{ - StartLedger: startLedger, + StartLedger: int64(ledgerSeqRange.Start), Pagination: entities.RPCPagination{ - Limit: limit, + Limit: int(ledgerSeqRange.End - ledgerSeqRange.Start + 1), }, }) if err != nil { - return protocol.GetLedgersResponse{}, fmt.Errorf("sending getLedgers request: %w", err) + return GetLedgersResponse{}, fmt.Errorf("sending getLedgers request: %w", err) } - var result protocol.GetLedgersResponse + var result GetLedgersResponse err = json.Unmarshal(resultBytes, &result) if err != nil { - return protocol.GetLedgersResponse{}, fmt.Errorf("parsing getLedgers result JSON: %w", err) + return GetLedgersResponse{}, fmt.Errorf("parsing getLedgers result JSON: %w", err) } return result, nil From 408e252918a37280389eef386f8858ac59bce85b Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 15:39:37 -0700 Subject: [PATCH 08/27] Start fetching ledger batches. --- internal/services/ingest.go | 66 +++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 64f518f7..333b3925 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -87,7 +87,7 @@ func NewIngestService( }, nil } -const maxLedgerWindow = 1 // NOTE: cannot be larger than 200 +const maxLedgerWindow = 200 // NOTE: cannot be larger than 200 // getLedgerSeqRange returns a ledger sequence range to ingest. It takes into account: // - the ledgers available in the RPC, @@ -129,12 +129,14 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u } } - ticker := time.NewTicker(time.Second * 5) + const tickerDuration = time.Second * 5 + ticker := time.NewTicker(tickerDuration) defer ticker.Stop() signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT) defer signal.Stop(signalChan) manualTriggerChan := make(chan any, 1) + defer close(manualTriggerChan) log.Ctx(ctx).Info("Starting ingestion loop") for { @@ -147,45 +149,38 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u return fmt.Errorf("context cancelled: %w", ctx.Err()) case <-manualTriggerChan: log.Ctx(ctx).Info("Manual trigger received") - fallthrough case <-ticker.C: log.Ctx(ctx).Info("Ticker ticked") - totalIngestionStart := time.Now() - // fetch ledgers - getLedgersResponse, err := m.fetchNextLedgersBatch(ctx, startLedger) - if err != nil { - return fmt.Errorf("fetching next ledgers batch: %w", err) - } - if errors.Is(err, ErrAlreadyInSync) { - log.Ctx(ctx).Info("Ingestion is already in sync, will retry in a few moments...") - continue - } + } - // process ledgers - err = m.processLedgerResponse(ctx, getLedgersResponse) - if err != nil { - return fmt.Errorf("processing ledger response: %w", err) - } + totalIngestionStart := time.Now() + // fetch ledgers + getLedgersResponse, err := m.fetchNextLedgersBatch(ctx, startLedger) + if err != nil { + return fmt.Errorf("fetching next ledgers batch: %w", err) + } + if errors.Is(err, ErrAlreadyInSync) { + log.Ctx(ctx).Info("Ingestion is already in sync, will retry in a few moments...") + continue + } - // update cursor - startLedger = getLedgersResponse.Ledgers[len(getLedgersResponse.Ledgers)-1].Sequence - err = m.models.IngestStore.UpdateLatestLedgerSynced(ctx, m.ledgerCursorName, startLedger) - if err != nil { - return fmt.Errorf("updating latest synced ledger: %w", err) - } - m.metricsService.SetLatestLedgerIngested(float64(getLedgersResponse.LatestLedger)) - m.metricsService.ObserveIngestionDuration(totalIngestionPrometheusLabel, time.Since(totalIngestionStart).Seconds()) + // process ledgers + err = m.processLedgerResponse(ctx, getLedgersResponse) + if err != nil { + return fmt.Errorf("processing ledger response: %w", err) + } - // Trigger another ingestion immediately if we're behind - rpcHealth, err := m.rpcService.GetHealth() - if err != nil { - log.Ctx(ctx).Errorf("Failed to get RPC health to check if we're behind: %v", err) - continue - } - if rpcHealth.LatestLedger-startLedger > 1 { - manualTriggerChan <- true - } + // update cursor + startLedger = getLedgersResponse.Ledgers[len(getLedgersResponse.Ledgers)-1].Sequence + err = m.models.IngestStore.UpdateLatestLedgerSynced(ctx, m.ledgerCursorName, startLedger) + if err != nil { + return fmt.Errorf("updating latest synced ledger: %w", err) } + m.metricsService.SetLatestLedgerIngested(float64(getLedgersResponse.LatestLedger)) + m.metricsService.ObserveIngestionDuration(totalIngestionPrometheusLabel, time.Since(totalIngestionStart).Seconds()) + + ticker.Reset(tickerDuration) + manualTriggerChan <- nil } } @@ -199,6 +194,7 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes return nil } +// fetchNextLedgersBatch fetches the next batch of ledgers from the RPC service. func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger uint32) (GetLedgersResponse, error) { rpcHealth, err := m.rpcService.GetHealth() if err != nil { From 8d0acbe772cb0a265dea3d27b1e72db710694676 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 16:11:32 -0700 Subject: [PATCH 09/27] Add parallelization when processing ledgers --- internal/services/ingest.go | 53 ++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 333b3925..805d77f1 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -9,9 +9,11 @@ import ( "syscall" "time" + "github.com/alitto/pond" "github.com/stellar/go/support/log" "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" + "github.com/stellar/stellar-rpc/protocol" "github.com/stellar/wallet-backend/internal/apptracker" "github.com/stellar/wallet-backend/internal/data" @@ -184,16 +186,59 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u } } +type result[T any] struct { + ledger protocol.LedgerInfo + processedData T + err error +} + func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersResponse GetLedgersResponse) error { log.Ctx(ctx).Warnf("🚧 TODO: process & ingest ledger response") - var sequences []uint32 - for _, ledger := range getLedgersResponse.Ledgers { - sequences = append(sequences, ledger.Sequence) + + // Create a worker pool with + const poolSize = 4 + pool := pond.New(poolSize, maxLedgerWindow, pond.Context(ctx)) + + // Create a slice to store results + results := make([]result[any], len(getLedgersResponse.Ledgers)) + var errs []error + + // Submit tasks to the pool + for i, ledger := range getLedgersResponse.Ledgers { + ledger := ledger // Create a new variable to avoid closure issues + pool.Submit(func() { + processedData, err := m.processLedger(ctx, ledger) + if err != nil { + err = fmt.Errorf("processing ledger %d: %w", ledger.Sequence, err) + errs = append(errs, err) + } + results[i] = result[any]{ + ledger: ledger, + processedData: processedData, + err: err, + } + }) } - log.Ctx(ctx).Debugf("sequences(%d): %v", len(sequences), sequences) + + // Wait for all tasks to complete + pool.StopAndWait() + + if len(errs) > 0 { + return fmt.Errorf("processing ledgers: %w", errors.Join(errs...)) + } + + for _, result := range results { + log.Ctx(ctx).Debugf("Processed ledger %d", result.ledger.Sequence) + } + return nil } +func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo) (any, error) { + log.Ctx(ctx).Warnf("🚧 TODO: process ledger %d", ledgerInfo.Sequence) + return nil, nil +} + // fetchNextLedgersBatch fetches the next batch of ledgers from the RPC service. func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger uint32) (GetLedgersResponse, error) { rpcHealth, err := m.rpcService.GetHealth() From 02367a3c6c4ca87b8c854d722c7b503161a82382 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 16:17:31 -0700 Subject: [PATCH 10/27] Fix tests --- internal/services/ingest_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/services/ingest_test.go b/internal/services/ingest_test.go index ea1d11a2..503bfb95 100644 --- a/internal/services/ingest_test.go +++ b/internal/services/ingest_test.go @@ -49,7 +49,7 @@ func Test_getLedgerSeqRange(t *testing.T) { wantInSync: false, wantResult: LedgerSeqRange{ Start: 10, - End: 10 + maxLedgerWindow, + End: min(10+maxLedgerWindow, 20), }, }, { @@ -60,7 +60,7 @@ func Test_getLedgerSeqRange(t *testing.T) { wantInSync: false, wantResult: LedgerSeqRange{ Start: 11, - End: 11 + maxLedgerWindow, + End: min(11+maxLedgerWindow, 20), }, }, { @@ -71,7 +71,7 @@ func Test_getLedgerSeqRange(t *testing.T) { wantInSync: false, wantResult: LedgerSeqRange{ Start: 16, - End: 16 + maxLedgerWindow, + End: min(16+maxLedgerWindow, 20), }, }, { From aa1af4dda9b2dc2c8eaeadcca5d4a2286d9dd337 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 16:23:53 -0700 Subject: [PATCH 11/27] Fix tests by making them point to the RunOld method. --- internal/services/ingest.go | 1 + internal/services/ingest_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 805d77f1..701d9007 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -259,6 +259,7 @@ func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger u return getLedgersResponse, nil } +// RunOld is the old ingestion loop. It is being deprecated. func (m *ingestService) RunOld(ctx context.Context, startLedger uint32, endLedger uint32) error { // Acquire advisory lock to prevent multiple ingestion instances from running concurrently if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { diff --git a/internal/services/ingest_test.go b/internal/services/ingest_test.go index 503bfb95..b9fc5786 100644 --- a/internal/services/ingest_test.go +++ b/internal/services/ingest_test.go @@ -427,7 +427,7 @@ func TestIngestPayments(t *testing.T) { }) } -func TestIngest_LatestSyncedLedgerBehindRPC(t *testing.T) { +func Test_Ingest_RunOld_LatestSyncedLedgerBehindRPC(t *testing.T) { dbt := dbtest.Open(t) dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) require.NoError(t, err) @@ -520,7 +520,7 @@ func TestIngest_LatestSyncedLedgerBehindRPC(t *testing.T) { } mockRPCService.On("GetHeartbeatChannel").Return(heartbeatChan) - err = ingestService.Run(ctx, uint32(49), uint32(50)) + err = ingestService.RunOld(ctx, uint32(49), uint32(50)) require.NoError(t, err) mockRPCService.AssertNotCalled(t, "GetTransactions", int64(49), "", int64(50)) @@ -531,7 +531,7 @@ func TestIngest_LatestSyncedLedgerBehindRPC(t *testing.T) { assert.Equal(t, uint32(50), ledger) } -func TestIngest_LatestSyncedLedgerAheadOfRPC(t *testing.T) { +func Test_Ingest_RunOld_LatestSyncedLedgerAheadOfRPC(t *testing.T) { dbt := dbtest.Open(t) dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) require.NoError(t, err) @@ -621,7 +621,7 @@ func TestIngest_LatestSyncedLedgerAheadOfRPC(t *testing.T) { mockAppTracker.On("CaptureMessage", mock.Anything).Maybe().Return(nil) // Start ingestion at ledger 100 (ahead of RPC's initial position at 50) - err = ingestService.Run(ctx, uint32(100), uint32(100)) + err = ingestService.RunOld(ctx, uint32(100), uint32(100)) require.NoError(t, err) // Verify the debug log message was written From d3ff615ccea757595c62b7fd15fd0de26d9d882f Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 16:57:09 -0700 Subject: [PATCH 12/27] Remove advisory lock from the deprecated function. --- internal/services/ingest.go | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 701d9007..85929bc8 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -261,19 +261,6 @@ func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger u // RunOld is the old ingestion loop. It is being deprecated. func (m *ingestService) RunOld(ctx context.Context, startLedger uint32, endLedger uint32) error { - // Acquire advisory lock to prevent multiple ingestion instances from running concurrently - if lockAcquired, err := db.AcquireAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { - return fmt.Errorf("acquiring advisory lock: %w", err) - } else if !lockAcquired { - return errors.New("advisory lock not acquired") - } - defer func() { - if err := db.ReleaseAdvisoryLock(ctx, m.models.DB, advisoryLockID); err != nil { - err = fmt.Errorf("releasing advisory lock: %w", err) - log.Ctx(ctx).Error(err) - } - }() - manualTriggerChannel := make(chan any, 1) go m.rpcService.TrackRPCServiceHealth(ctx, manualTriggerChannel) ingestHeartbeatChannel := make(chan any, 1) From 06cc5de05406bf2123fade79a556578a2c7699ea Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Mon, 16 Jun 2025 16:57:29 -0700 Subject: [PATCH 13/27] Add tests for rpcService.getLedgers() --- internal/services/rpc_service.go | 42 ++++----- internal/services/rpc_service_test.go | 117 +++++++++++++++++++++++++- 2 files changed, 137 insertions(+), 22 deletions(-) diff --git a/internal/services/rpc_service.go b/internal/services/rpc_service.go index 39a6c1f8..13bd7fd4 100644 --- a/internal/services/rpc_service.go +++ b/internal/services/rpc_service.go @@ -141,27 +141,6 @@ func (r *rpcService) GetHealth() (entities.RPCGetHealthResult, error) { return result, nil } -func (r *rpcService) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntriesResult, error) { - resultBytes, err := r.sendRPCRequest("getLedgerEntries", entities.RPCParams{ - LedgerKeys: keys, - }) - if err != nil { - return entities.RPCGetLedgerEntriesResult{}, fmt.Errorf("sending getLedgerEntries request: %w", err) - } - - var result entities.RPCGetLedgerEntriesResult - err = json.Unmarshal(resultBytes, &result) - if err != nil { - return entities.RPCGetLedgerEntriesResult{}, fmt.Errorf("parsing getLedgerEntries result JSON: %w", err) - } - return result, nil -} - -type LedgerSeqRange struct { - Start uint32 - End uint32 -} - type GetLedgersResponse protocol.GetLedgersResponse func (r *rpcService) GetLedgers(ledgerSeqRange LedgerSeqRange) (GetLedgersResponse, error) { @@ -184,6 +163,27 @@ func (r *rpcService) GetLedgers(ledgerSeqRange LedgerSeqRange) (GetLedgersRespon return result, nil } +func (r *rpcService) GetLedgerEntries(keys []string) (entities.RPCGetLedgerEntriesResult, error) { + resultBytes, err := r.sendRPCRequest("getLedgerEntries", entities.RPCParams{ + LedgerKeys: keys, + }) + if err != nil { + return entities.RPCGetLedgerEntriesResult{}, fmt.Errorf("sending getLedgerEntries request: %w", err) + } + + var result entities.RPCGetLedgerEntriesResult + err = json.Unmarshal(resultBytes, &result) + if err != nil { + return entities.RPCGetLedgerEntriesResult{}, fmt.Errorf("parsing getLedgerEntries result JSON: %w", err) + } + return result, nil +} + +type LedgerSeqRange struct { + Start uint32 + End uint32 +} + func (r *rpcService) SendTransaction(transactionXDR string) (entities.RPCSendTransactionResult, error) { resultBytes, err := r.sendRPCRequest("sendTransaction", entities.RPCParams{Transaction: transactionXDR}) if err != nil { diff --git a/internal/services/rpc_service_test.go b/internal/services/rpc_service_test.go index 1fcfaa8c..2f3c4ba7 100644 --- a/internal/services/rpc_service_test.go +++ b/internal/services/rpc_service_test.go @@ -15,6 +15,7 @@ import ( "github.com/stellar/go/network" "github.com/stellar/go/support/log" "github.com/stellar/go/xdr" + "github.com/stellar/stellar-rpc/protocol" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -320,7 +321,7 @@ func Test_rpcService_SimulateTransaction(t *testing.T) { require.NoError(t, err) defer dbConnectionPool.Close() - const rpcURL = "http://api.vibrantapp.com/soroban/rpc" + const rpcURL = "https://test.com/soroban-rpc" const transactionXDR = "AAAAAgAAAACnroqZn2p1MGBHWWDhZOaG3H73hXYtdc4Jz27c287ITQAAAGQAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAABoCqJrAAAAAAAAAAEAAAAAAAAAGAAAAAAAAAAB15KLcsJwPM/q9+uf9O9NUEpVqLl5/JtFDqLIQrTRzmEAAAAIdHJhbnNmZXIAAAADAAAAEgAAAAAAAAAAEG5rRhQ6188E3xuAkXVVe82tgIW2yZyDyH43k9/YHKUAAAASAAAAAAAAAAAQbmtGFDrXzwTfG4CRdVV7za2AhbbJnIPIfjeT39gcpQAAAAoAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA" testCases := []struct { @@ -740,6 +741,120 @@ func TestSendGetHealth(t *testing.T) { }) } +func Test_rpcService_GetLedgers(t *testing.T) { + dbt := dbtest.Open(t) + defer dbt.Close() + dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool.Close() + + const rpcURL = "https://test.com/soroban-rpc" + + t.Run("🟢successful", func(t *testing.T) { + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("IncRPCRequests", "getLedgers").Once(). + On("IncRPCEndpointSuccess", "getLedgers").Once(). + On("ObserveRPCRequestDuration", "getLedgers", mock.AnythingOfType("float64")).Once() + defer mockMetricsService.AssertExpectations(t) + + payload := map[string]any{ + "jsonrpc": "2.0", + "id": 1, + "method": "getLedgers", + "params": entities.RPCParams{ + StartLedger: 1541075, + Pagination: entities.RPCPagination{ + Limit: 1, + }, + }, + } + jsonData, err := json.Marshal(payload) + require.NoError(t, err) + + mockHTTPClient := utils.MockHTTPClient{} + mockHTTPClient. + On("Post", rpcURL, "application/json", bytes.NewBuffer(jsonData)). + Return(&http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{ + "jsonrpc": "2.0", + "id": 8675309, + "result": { + "ledgers": [ + { + "hash": "f0174cd1dad2b9304af43c9a3ed30900e738d2d0f88940d49d776c51bcfa610d", + "sequence": 1541075, + "ledgerCloseTime": "1750117639", + "headerXdr": "8BdM0drSuTBK9DyaPtMJAOc40tD4iUDUnXdsUbz6YQ0AAAAWZfBjwPwfuchZSCct99MtE4yNfMuH0ishqJI3+m+vz9aw7CHKFwey/sLAvUFPrqMz3wS3efPbVR4hGaKsW7b6kwAAAABoUK0HAAAAAAAAAAEAAAAAqCTNGLyddQZNKZpbW6ykO8OqLzJpOBU9jC+btctt8DMAAABACMD13DKnK60i/BL7Mp0H4vWLhEXK1tzZ1h3AHKRV56KZSoz2ybKa2P9fQuWdvUhVXTroQz1LL3zRHeoyUPp4C98/YZgEqS/bQFcZLcQ910jqd4rcUrxJjOgFJMAUuBEZ08HM8t09UZJ1Kqg6cuLLvK6IV3+m24+jVo/lTAPkWJEAF4PTDeC2s6dkAAAAAAEKk0jb3QAAAAAAAAAAAAB+pwAAAGQATEtAAAAAyErGE6gBB1x9QL8XPn3RWBGlWkkhSzINUNtpHEgvUn0Y62DCzECDr9D1930epUoKD/aiJy08uliELqIZSfcqk1u0dEs3T0QyOh071TGfSuMRy+VYJD1rKa+7JQTYNwADLhRpfTb42PTjRzh3ChN9hbTQIDrhnu7KnZl+cPDflR4eAAAAAAAAAAA=", + "metadataXdr": "AAAAAQAAAADwF0zR2tK5MEr0PJo+0wkA5zjS0PiJQNSdd2xRvPphDQAAABZl8GPA/B+5yFlIJy330y0TjI18y4fSKyGokjf6b6/P1rDsIcoXB7L+wsC9QU+uozPfBLd589tVHiEZoqxbtvqTAAAAAGhQrQcAAAAAAAAAAQAAAACoJM0YvJ11Bk0pmltbrKQ7w6ovMmk4FT2ML5u1y23wMwAAAEAIwPXcMqcrrSL8EvsynQfi9YuERcrW3NnWHcAcpFXnoplKjPbJsprY/19C5Z29SFVdOuhDPUsvfNEd6jJQ+ngL3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnTwczy3T1RknUqqDpy4su8rohXf6bbj6NWj+VMA+RYkQAXg9MN4Lazp2QAAAAAAQqTSNvdAAAAAAAAAAAAAH6nAAAAZABMS0AAAADISsYTqAEHXH1Avxc+fdFYEaVaSSFLMg1Q22kcSC9SfRjrYMLMQIOv0PX3fR6lSgoP9qInLTy6WIQuohlJ9yqTW7R0SzdPRDI6HTvVMZ9K4xHL5VgkPWspr7slBNg3AAMuFGl9NvjY9ONHOHcKE32FtNAgOuGe7sqdmX5w8N+VHh4AAAAAAAAAAAAAAAFl8GPA/B+5yFlIJy330y0TjI18y4fSKyGokjf6b6/P1gAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/ZvdkAAAAAAAAAAA==" + } + ], + "latestLedger": 1541079, + "latestLedgerCloseTime": 1750117659, + "oldestLedger": 1420120, + "oldestLedgerCloseTime": 1749512372, + "cursor": "1541075" + } + }`)), + }, nil). + Once() + + rpcService, err := NewRPCService(rpcURL, network.TestNetworkPassphrase, &mockHTTPClient, mockMetricsService) + require.NoError(t, err) + + result, err := rpcService.GetLedgers(LedgerSeqRange{ + Start: 1541075, + End: 1541075, + }) + require.NoError(t, err) + + assert.Equal(t, GetLedgersResponse{ + LatestLedger: 1541079, + LatestLedgerCloseTime: 1750117659, + OldestLedger: 1420120, + OldestLedgerCloseTime: 1749512372, + Cursor: "1541075", + Ledgers: []protocol.LedgerInfo{ + { + Sequence: 1541075, + Hash: "f0174cd1dad2b9304af43c9a3ed30900e738d2d0f88940d49d776c51bcfa610d", + LedgerHeader: "8BdM0drSuTBK9DyaPtMJAOc40tD4iUDUnXdsUbz6YQ0AAAAWZfBjwPwfuchZSCct99MtE4yNfMuH0ishqJI3+m+vz9aw7CHKFwey/sLAvUFPrqMz3wS3efPbVR4hGaKsW7b6kwAAAABoUK0HAAAAAAAAAAEAAAAAqCTNGLyddQZNKZpbW6ykO8OqLzJpOBU9jC+btctt8DMAAABACMD13DKnK60i/BL7Mp0H4vWLhEXK1tzZ1h3AHKRV56KZSoz2ybKa2P9fQuWdvUhVXTroQz1LL3zRHeoyUPp4C98/YZgEqS/bQFcZLcQ910jqd4rcUrxJjOgFJMAUuBEZ08HM8t09UZJ1Kqg6cuLLvK6IV3+m24+jVo/lTAPkWJEAF4PTDeC2s6dkAAAAAAEKk0jb3QAAAAAAAAAAAAB+pwAAAGQATEtAAAAAyErGE6gBB1x9QL8XPn3RWBGlWkkhSzINUNtpHEgvUn0Y62DCzECDr9D1930epUoKD/aiJy08uliELqIZSfcqk1u0dEs3T0QyOh071TGfSuMRy+VYJD1rKa+7JQTYNwADLhRpfTb42PTjRzh3ChN9hbTQIDrhnu7KnZl+cPDflR4eAAAAAAAAAAA=", + LedgerCloseTime: 1750117639, + LedgerMetadata: "AAAAAQAAAADwF0zR2tK5MEr0PJo+0wkA5zjS0PiJQNSdd2xRvPphDQAAABZl8GPA/B+5yFlIJy330y0TjI18y4fSKyGokjf6b6/P1rDsIcoXB7L+wsC9QU+uozPfBLd589tVHiEZoqxbtvqTAAAAAGhQrQcAAAAAAAAAAQAAAACoJM0YvJ11Bk0pmltbrKQ7w6ovMmk4FT2ML5u1y23wMwAAAEAIwPXcMqcrrSL8EvsynQfi9YuERcrW3NnWHcAcpFXnoplKjPbJsprY/19C5Z29SFVdOuhDPUsvfNEd6jJQ+ngL3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnTwczy3T1RknUqqDpy4su8rohXf6bbj6NWj+VMA+RYkQAXg9MN4Lazp2QAAAAAAQqTSNvdAAAAAAAAAAAAAH6nAAAAZABMS0AAAADISsYTqAEHXH1Avxc+fdFYEaVaSSFLMg1Q22kcSC9SfRjrYMLMQIOv0PX3fR6lSgoP9qInLTy6WIQuohlJ9yqTW7R0SzdPRDI6HTvVMZ9K4xHL5VgkPWspr7slBNg3AAMuFGl9NvjY9ONHOHcKE32FtNAgOuGe7sqdmX5w8N+VHh4AAAAAAAAAAAAAAAFl8GPA/B+5yFlIJy330y0TjI18y4fSKyGokjf6b6/P1gAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/ZvdkAAAAAAAAAAA==", + }, + }, + }, result) + }) + + t.Run("🔴rpc_request_fails", func(t *testing.T) { + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("IncRPCRequests", "getLedgers").Once(). + On("IncRPCEndpointFailure", "getLedgers").Once(). + On("ObserveRPCRequestDuration", "getLedgers", mock.AnythingOfType("float64")).Once() + defer mockMetricsService.AssertExpectations(t) + + mockHTTPClient := utils.MockHTTPClient{} + mockHTTPClient. + On("Post", rpcURL, "application/json", mock.Anything). + Return(&http.Response{}, errors.New("connection failed")). + Once() + + rpcService, err := NewRPCService(rpcURL, network.TestNetworkPassphrase, &mockHTTPClient, mockMetricsService) + require.NoError(t, err) + + result, err := rpcService.GetLedgers(LedgerSeqRange{ + Start: 1541075, + End: 1541075, + }) + require.Error(t, err) + + assert.Equal(t, GetLedgersResponse{}, result) + assert.Equal(t, "sending getLedgers request: sending POST request to RPC: connection failed", err.Error()) + }) +} + func TestTrackRPCServiceHealth_HealthyService(t *testing.T) { dbt := dbtest.Open(t) defer dbt.Close() From c172adee9e27b69479b8357073d2ad421aedb85d Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 08:50:03 -0700 Subject: [PATCH 14/27] [WIP] start tx and ops processors --- go.mod | 55 +++++- go.sum | 211 +++++++++++++++++++++- internal/indexer/types/types.go | 42 ++++- internal/ingest/participants.go | 307 ++++++++++++++++++++++++++++++++ 4 files changed, 611 insertions(+), 4 deletions(-) create mode 100644 internal/ingest/participants.go diff --git a/go.mod b/go.mod index e50347f2..028573c1 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/spf13/viper v1.20.1 - github.com/stellar/go v0.0.0-20250417130535-10df2019148f + github.com/stellar/go v0.0.0-20250613214159-65b2d613a208 github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2 github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5 github.com/stretchr/testify v1.10.0 @@ -29,20 +29,50 @@ require ( ) require ( + cel.dev/expr v0.16.1 // indirect + cloud.google.com/go v0.116.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.2.2 // indirect + cloud.google.com/go/monitoring v1.21.2 // indirect + cloud.google.com/go/storage v1.49.0 // indirect github.com/BurntSushi/toml v1.3.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/creachadair/jrpc2 v1.2.0 // indirect + github.com/creachadair/mds v0.13.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da // indirect + github.com/djherbis/fscache v0.10.1 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/gabriel-vasile/mimetype v1.4.8 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect github.com/gorilla/schema v1.4.1 // indirect + github.com/guregu/null v4.0.0+incompatible // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.18.0 // indirect @@ -50,8 +80,10 @@ require ( github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.62.0 // indirect @@ -65,12 +97,33 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sync v0.13.0 // indirect golang.org/x/sys v0.32.0 // indirect + golang.org/x/time v0.8.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/api v0.215.0 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect + google.golang.org/grpc v1.67.3 // indirect google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/djherbis/atime.v1 v1.0.0 // indirect + gopkg.in/djherbis/stream.v1 v1.3.1 // indirect gopkg.in/tylerb/graceful.v1 v1.2.15 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 5181ccb9..293324f7 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,45 @@ +cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/pubsub v1.45.1 h1:ZC/UzYcrmK12THWn1P72z+Pnp2vu/zCZRXyhAfP1hJY= +cloud.google.com/go/pubsub v1.45.1/go.mod h1:3bn7fTmzZFwaUjllitv1WlsNMkqBgGUb3UdMhI54eCc= +cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= +cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f h1:zvClvFQwU++UpIUBGC8YmDlfhUrweEy1R1Fj1gu5iIM= github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -24,22 +60,53 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creachadair/jrpc2 v1.2.0 h1:SXr0OgnwM0X18P+HccJP0uT3KGSDk/BCSRlJBvE2bMY= +github.com/creachadair/jrpc2 v1.2.0/go.mod h1:66uKSdr6tR5ZeNvkIjDSbbVUtOv0UhjS/vcd8ECP7Iw= +github.com/creachadair/mds v0.13.4 h1:RgU0MhiVqkzp6/xtNWhK6Pw7tDeaVuGFtA0UA2RBYvY= +github.com/creachadair/mds v0.13.4/go.mod h1:4vrFYUzTXMJpMBU+OA292I6IUxKWCCfZkgXg+/kBZMo= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/djherbis/fscache v0.10.1 h1:hDv+RGyvD+UDKyRYuLoVNbuRTnf2SrA2K3VyR1br9lk= +github.com/djherbis/fscache v0.10.1/go.mod h1:yyPYtkNnnPXsW+81lAcQS6yab3G2CRfnPLotBvtbf0c= github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI= github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsouza/fake-gcs-server v1.49.2 h1:fukDqzEQM50QkA0jAbl6cLqeDu3maQjwZBuys759TR4= +github.com/fsouza/fake-gcs-server v1.49.2/go.mod h1:17SYzJEXRcaAA5ATwwvgBkSIqIy7r1icnGM0y/y4foY= github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4= @@ -56,6 +123,11 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -72,21 +144,55 @@ github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlnd github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 h1:oERTZ1buOUYlpmKaqlO5fYmz8cZ1rYu5DieJzF4ZVmU= github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= +github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= github.com/guregu/null v4.0.0+incompatible h1:4zw0ckM7ECd6FNNddc3Fu4aty9nTlpkkzH7dPn4/4Gw= github.com/guregu/null v4.0.0+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -142,6 +248,8 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= @@ -150,6 +258,10 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= +github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -162,6 +274,7 @@ github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/ github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -203,19 +316,24 @@ github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= -github.com/stellar/go v0.0.0-20250417130535-10df2019148f h1:+aEOop0hlhwYuIlJhKAS5rM0OCSH1L8U+L3tM7lXcjk= -github.com/stellar/go v0.0.0-20250417130535-10df2019148f/go.mod h1:wE/ZDmjys55VprPR5qx5Ojx0cUi3f7MJ+dc5gzM+03k= +github.com/stellar/go v0.0.0-20250613214159-65b2d613a208 h1:gfTuX5bfx+HaZbA3aDJI6r9tMdA9dGeHcftOTjtXcIY= +github.com/stellar/go v0.0.0-20250613214159-65b2d613a208/go.mod h1:SbWsnxzU24xq4gh3v1f5HZ2W3EDdZkPYJOZu9TxNG7c= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2 h1:OzCVd0SV5qE3ZcDeSFCmOWLZfEWZ3Oe8KtmSOYKEVWE= github.com/stellar/go-xdr v0.0.0-20231122183749-b53fb00bcac2/go.mod h1:yoxyU/M8nl9LKeWIoBrbDPQ7Cy+4jxRcWcOayZ4BMps= github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5 h1:V+XezRLVHuk6c1nMkXkWjCwtoHN7F+DK86dK2kkNSZo= github.com/stellar/stellar-rpc v0.9.6-0.20250523162628-6bb9d7a387d5/go.mod h1:21zn7aUjDQZih77MDIFfsVN5Cjdiv1sMh+V51xgZwRw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -238,42 +356,129 @@ github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d h1:yJIizrfO599ot2 github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce h1:888GrqRxabUce7lj4OaoShPxodm3kXOMpSa85wdYzfY= github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= +google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= +google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/djherbis/atime.v1 v1.0.0 h1:eMRqB/JrLKocla2PBPKgQYg/p5UG4L6AUAs92aP7F60= +gopkg.in/djherbis/atime.v1 v1.0.0/go.mod h1:hQIUStKmJfvf7xdh/wtK84qe+DsTV5LnA9lzxxtPpJ8= +gopkg.in/djherbis/stream.v1 v1.3.1 h1:uGfmsOY1qqMjQQphhRBSGLyA9qumJ56exkRu9ASTjCw= +gopkg.in/djherbis/stream.v1 v1.3.1/go.mod h1:aEV8CBVRmSpLamVJfM903Npic1IKmb2qS30VAZ+sssg= gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw= gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0/go.mod h1:WtiW9ZA1LdaWqtQRo1VbIL/v4XZ8NDta+O/kSpGgVek= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -288,3 +493,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/indexer/types/types.go b/internal/indexer/types/types.go index 16bd68e8..4991312a 100644 --- a/internal/indexer/types/types.go +++ b/internal/indexer/types/types.go @@ -6,6 +6,8 @@ import ( "encoding/json" "fmt" "time" + + "github.com/stellar/go/xdr" ) type Account struct { @@ -18,7 +20,7 @@ type Transaction struct { EnvelopeXDR string `json:"envelopeXdr,omitempty" db:"envelope_xdr"` ResultXDR string `json:"resultXdr,omitempty" db:"result_xdr"` MetaXDR string `json:"metaXdr,omitempty" db:"meta_xdr"` - LedgerNumber int64 `json:"ledgerNumber,omitempty" db:"ledger_number"` + LedgerNumber uint32 `json:"ledgerNumber,omitempty" db:"ledger_number"` LedgerCreatedAt time.Time `json:"ledgerCreatedAt,omitempty" db:"ledger_created_at"` IngestedAt time.Time `json:"ingestedAt,omitempty" db:"ingested_at"` // Relationships: @@ -29,6 +31,44 @@ type Transaction struct { type OperationType string +// xdrToOperationTypeMap provides 1:1 mapping between XDR OperationType and custom OperationType +var xdrToOperationTypeMap = map[xdr.OperationType]OperationType{ + xdr.OperationTypeCreateAccount: OperationTypeCreateAccount, + xdr.OperationTypePayment: OperationTypePayment, + xdr.OperationTypePathPaymentStrictReceive: OperationTypePathPaymentStrictReceive, + xdr.OperationTypeManageSellOffer: OperationTypeManageSellOffer, + xdr.OperationTypeCreatePassiveSellOffer: OperationTypeCreatePassiveSellOffer, + xdr.OperationTypeSetOptions: OperationTypeSetOptions, + xdr.OperationTypeChangeTrust: OperationTypeChangeTrust, + xdr.OperationTypeAllowTrust: OperationTypeAllowTrust, + xdr.OperationTypeAccountMerge: OperationTypeAccountMerge, + xdr.OperationTypeInflation: OperationTypeInflation, + xdr.OperationTypeManageData: OperationTypeManageData, + xdr.OperationTypeBumpSequence: OperationTypeBumpSequence, + xdr.OperationTypeManageBuyOffer: OperationTypeManageBuyOffer, + xdr.OperationTypePathPaymentStrictSend: OperationTypePathPaymentStrictSend, + xdr.OperationTypeCreateClaimableBalance: OperationTypeCreateClaimableBalance, + xdr.OperationTypeClaimClaimableBalance: OperationTypeClaimClaimableBalance, + xdr.OperationTypeBeginSponsoringFutureReserves: OperationTypeBeginSponsoringFutureReserves, + xdr.OperationTypeEndSponsoringFutureReserves: OperationTypeEndSponsoringFutureReserves, + xdr.OperationTypeRevokeSponsorship: OperationTypeRevokeSponsorship, + xdr.OperationTypeClawback: OperationTypeClawback, + xdr.OperationTypeClawbackClaimableBalance: OperationTypeClawbackClaimableBalance, + xdr.OperationTypeSetTrustLineFlags: OperationTypeSetTrustLineFlags, + xdr.OperationTypeLiquidityPoolDeposit: OperationTypeLiquidityPoolDeposit, + xdr.OperationTypeLiquidityPoolWithdraw: OperationTypeLiquidityPoolWithdraw, + xdr.OperationTypeInvokeHostFunction: OperationTypeInvokeHostFunction, + xdr.OperationTypeExtendFootprintTtl: OperationTypeExtendFootprintTTL, + xdr.OperationTypeRestoreFootprint: OperationTypeRestoreFootprint, +} + +func OperationTypeFromXDR(xdrOpType xdr.OperationType) OperationType { + if mappedType, exists := xdrToOperationTypeMap[xdrOpType]; exists { + return mappedType + } + return "" +} + const ( OperationTypeCreateAccount OperationType = "CREATE_ACCOUNT" OperationTypePayment OperationType = "PAYMENT" diff --git a/internal/ingest/participants.go b/internal/ingest/participants.go new file mode 100644 index 00000000..ca33cdd6 --- /dev/null +++ b/internal/ingest/participants.go @@ -0,0 +1,307 @@ +package ingest + +import ( + "fmt" + "sort" + "time" + + "github.com/stellar/go/ingest" + operation_processor "github.com/stellar/go/processors/operation" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + + "github.com/stellar/wallet-backend/internal/indexer/types" +) + +// ParticipantsProcessor is a processor which ingests various participants +// from different sources (transactions, operations, etc) +type ParticipantsProcessor struct { + TxsByParticipant map[Participant][]types.Transaction + OpsByParticipant map[Participant][]types.Operation + network string +} + +func NewParticipantsProcessor(network string) *ParticipantsProcessor { + return &ParticipantsProcessor{ + network: network, + TxsByParticipant: make(map[Participant][]types.Transaction), + OpsByParticipant: make(map[Participant][]types.Operation), + } +} + +func participantsForChanges(changes xdr.LedgerEntryChanges) ([]xdr.AccountId, error) { + var participants []xdr.AccountId + + for _, c := range changes { + var participant *xdr.AccountId + + switch c.Type { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + participant = participantsForLedgerEntry(c.MustCreated()) + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + participant = participantsForLedgerKey(c.MustRemoved()) + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + participant = participantsForLedgerEntry(c.MustUpdated()) + case xdr.LedgerEntryChangeTypeLedgerEntryState: + participant = participantsForLedgerEntry(c.MustState()) + default: + return nil, errors.Errorf("Unknown change type: %s", c.Type) + } + + if participant != nil { + participants = append(participants, *participant) + } + } + + return participants, nil +} + +func participantsForLedgerEntry(le xdr.LedgerEntry) *xdr.AccountId { + if le.Data.Type != xdr.LedgerEntryTypeAccount { + return nil + } + aid := le.Data.MustAccount().AccountId + return &aid +} + +func participantsForLedgerKey(lk xdr.LedgerKey) *xdr.AccountId { + if lk.Type != xdr.LedgerEntryTypeAccount { + return nil + } + aid := lk.MustAccount().AccountId + return &aid +} + +func participantsForMeta(meta xdr.TransactionMeta) ([]xdr.AccountId, error) { + var participants []xdr.AccountId + if meta.Operations == nil { + return participants, nil + } + + for _, op := range *meta.Operations { + var accounts []xdr.AccountId + accounts, err := participantsForChanges(op.Changes) + if err != nil { + return nil, err + } + + participants = append(participants, accounts...) + } + + return participants, nil +} + +func (p *ParticipantsProcessor) addTransactionParticipants( + sequence uint32, + transaction ingest.LedgerTransaction, +) error { + if len(p.OpsByParticipant) == 0 { + err := p.addOperationsParticipants(sequence, transaction) + if err != nil { + return fmt.Errorf("could not determine operation participants: %w", err) + } + } + + txsByParticipant, err := participantsForTransaction(sequence, transaction, p.OpsByParticipant) + if err != nil { + return fmt.Errorf("could not determine participants for transaction: %w", err) + } + p.TxsByParticipant = txsByParticipant + + return nil +} + +func (p *ParticipantsProcessor) addOperationsParticipants( + sequence uint32, + transaction ingest.LedgerTransaction, +) error { + participants, err := operationsParticipants(transaction, sequence, p.network) + if err != nil { + return errors.Wrap(err, "could not determine operation participants") + } + + p.OpsByParticipant = participants + + return nil +} + +type Participant string + +// operationsParticipants returns a map with all participants per operation. +func operationsParticipants(transaction ingest.LedgerTransaction, sequence uint32, network string) (allOpsParticipants map[Participant][]types.Operation, err error) { + now := time.Now() + ledgerCreatedAt := transaction.Ledger.ClosedAt() + txHash := transaction.Hash.HexString() + + for opi, xdrOp := range transaction.Envelope.Operations() { + op := operation_processor.TransactionOperationWrapper{ + Index: uint32(opi), + Transaction: transaction, + Operation: xdrOp, + LedgerSequence: sequence, + Network: network, + } + opID := op.ID() + + p, err := op.Participants() + if err != nil { + return allOpsParticipants, errors.Wrapf(err, "reading operation %v participants", opID) + } + + xdrOpStr, err := xdr.MarshalBase64(xdrOp) + if err != nil { + return allOpsParticipants, fmt.Errorf("marshalling operation %v: %w", opID, err) + } + dbOp := types.Operation{ + ID: string(opID), + OperationType: types.OperationTypeFromXDR(op.OperationType()), + OperationXDR: xdrOpStr, + LedgerCreatedAt: ledgerCreatedAt, + IngestedAt: now, + TxHash: txHash, + } + + for _, xdrParticipant := range dedupeParticipants(p) { + participant := Participant(xdrParticipant.Address()) + allOpsParticipants[participant] = append(allOpsParticipants[participant], dbOp) + } + } + + return allOpsParticipants, nil +} + +func (p *ParticipantsProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { + if err := p.addOperationsParticipants(lcm.LedgerSequence(), transaction); err != nil { + return err + } + + if err := p.addTransactionParticipants(lcm.LedgerSequence(), transaction); err != nil { + return err + } + + return nil +} + +func directParticipantsForTransaction(transaction ingest.LedgerTransaction) (participantStrs []Participant, err error) { + participants := []xdr.AccountId{ + transaction.Envelope.SourceAccount().ToAccountId(), + } + if transaction.Envelope.IsFeeBump() { + participants = append(participants, transaction.Envelope.FeeBumpAccount().ToAccountId()) + } + + p, err := participantsForMeta(transaction.UnsafeMeta) + if err != nil { + return nil, fmt.Errorf("identifying participants for meta: %w", err) + } + participants = append(participants, p...) + + p, err = participantsForChanges(transaction.FeeChanges) + if err != nil { + return nil, fmt.Errorf("identifying participants for changes: %w", err) + } + participants = append(participants, p...) + participants = dedupeParticipants(participants) + + participantStrs = make([]Participant, len(participants)) + for _, p := range participants { + participantStrs = append(participantStrs, Participant(p.Address())) + } + + return participantStrs, nil +} + +func participantsForTransaction( + sequence uint32, + transaction ingest.LedgerTransaction, + opsByParticipant map[Participant][]types.Operation, +) (txsByParticipant map[Participant][]types.Transaction, err error) { + txDirectParticipants, err := directParticipantsForTransaction(transaction) + if err != nil { + return nil, fmt.Errorf("could not determine participants for transaction: %w", err) + } + + opsParticipants := make([]Participant, 0, len(opsByParticipant)) + for k := range opsByParticipant { + opsParticipants = append(opsParticipants, k) + } + + participants := make([]Participant, 0, len(txDirectParticipants)+len(opsParticipants)) + participants = append(participants, txDirectParticipants...) + participants = append(participants, opsParticipants...) + participants = DedupeComparable(participants) + + envelopeXDR, err := xdr.MarshalBase64(transaction.Envelope) + if err != nil { + return nil, errors.Wrapf(err, "marshalling transaction envelope") + } + + resultXDR, err := xdr.MarshalBase64(transaction.Result) + if err != nil { + return nil, errors.Wrapf(err, "marshalling transaction result") + } + + metaXDR, err := xdr.MarshalBase64(transaction.UnsafeMeta) + if err != nil { + return nil, errors.Wrapf(err, "marshalling transaction meta") + } + + now := time.Now() + txDB := types.Transaction{ + Hash: transaction.Hash.HexString(), + LedgerCreatedAt: transaction.Ledger.ClosedAt(), + IngestedAt: now, + EnvelopeXDR: envelopeXDR, + ResultXDR: resultXDR, + MetaXDR: metaXDR, + LedgerNumber: transaction.LedgerVersion, + } + for _, participant := range participants { + txsByParticipant[participant] = append(txsByParticipant[participant], txDB) + } + + return txsByParticipant, nil +} + +// dedupeParticipants remove any duplicate ids from `in` +func dedupeParticipants(in []xdr.AccountId) []xdr.AccountId { + if len(in) <= 1 { + return in + } + sort.Slice(in, func(i, j int) bool { + return in[i].Address() < in[j].Address() + }) + insert := 1 + for cur := 1; cur < len(in); cur++ { + if in[cur].Equals(in[cur-1]) { + continue + } + if insert != cur { + in[insert] = in[cur] + } + insert++ + } + return in[:insert] +} + +// Dedupe removes duplicate elements from a slice while preserving order. +// It works with any comparable type. +func DedupeComparable[T comparable](slice []T) []T { + if len(slice) == 0 { + return slice + } + + // Create a map to track seen elements + seen := make(map[T]struct{}, len(slice)) + result := make([]T, 0, len(slice)) + + // Iterate through the slice and only add elements we haven't seen before + for _, item := range slice { + if _, exists := seen[item]; !exists { + seen[item] = struct{}{} + result = append(result, item) + } + } + + return result +} From 29e83b268c3bf58bd10230c39f45fd7f68542403 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 10:53:48 -0700 Subject: [PATCH 15/27] Start tracking participants of a ledger transaction --- internal/ingest/data_bundle.go | 88 +++++++++++++ internal/ingest/participants.go | 224 +++++++++----------------------- 2 files changed, 147 insertions(+), 165 deletions(-) create mode 100644 internal/ingest/data_bundle.go diff --git a/internal/ingest/data_bundle.go b/internal/ingest/data_bundle.go new file mode 100644 index 00000000..6ce7310a --- /dev/null +++ b/internal/ingest/data_bundle.go @@ -0,0 +1,88 @@ +package ingest + +import "github.com/stellar/wallet-backend/internal/indexer/types" + +type Participant string + +// Set is a set of strings that enforces uniqueness +type Set[T comparable] map[T]struct{} + +// Add adds a value to the set +func (s Set[T]) Add(value T) { + if s == nil { + s = make(Set[T]) + } + s[value] = struct{}{} +} + +// Contains checks if a value exists in the set +func (s Set[T]) Contains(value T) bool { + _, exists := s[value] + return exists +} + +// ToSlice converts the set to a slice +func (s Set[T]) ToSlice() []T { + result := make([]T, 0, len(s)) + for k := range s { + result = append(result, k) + } + return result +} + +type DataBundle struct { + Network string + Participants Set[Participant] + OpByID map[string]types.Operation + TxByHash map[string]types.Transaction + TxHashesByParticipant map[Participant]Set[string] + OpIDsByParticipant map[Participant]Set[string] +} + +func NewDataBundle(network string) DataBundle { + return DataBundle{ + Network: network, + Participants: Set[Participant]{}, + OpByID: map[string]types.Operation{}, + TxByHash: map[string]types.Transaction{}, + TxHashesByParticipant: map[Participant]Set[string]{}, + OpIDsByParticipant: map[Participant]Set[string]{}, + } +} + +func (b *DataBundle) PushTransactionWithParticipant(participant Participant, transaction types.Transaction) { + b.TxByHash[transaction.Hash] = transaction + b.TxHashesByParticipant[participant].Add(transaction.Hash) + b.Participants.Add(participant) +} + +func (b *DataBundle) PushOperationWithParticipant(participant Participant, operation types.Operation) { + b.OpByID[operation.ID] = operation + b.OpIDsByParticipant[participant].Add(operation.ID) + b.TxHashesByParticipant[participant].Add(operation.TxHash) + b.Participants.Add(participant) +} + +func (b *DataBundle) GetParticipantOperationIDs(participant Participant) Set[string] { + return b.OpIDsByParticipant[participant] +} + +func (b *DataBundle) GetParticipantOperations(participant Participant) []types.Operation { + ops := []types.Operation{} + for _, opID := range b.OpIDsByParticipant[participant].ToSlice() { + ops = append(ops, b.OpByID[opID]) + } + return ops +} + +func (b *DataBundle) GetParticipantTransactionHashes(participant Participant) Set[string] { + return b.TxHashesByParticipant[participant] +} + +func (b *DataBundle) GetParticipantTransactions(participant Participant) []types.Transaction { + txs := []types.Transaction{} + for _, txHash := range b.TxHashesByParticipant[participant].ToSlice() { + txs = append(txs, b.TxByHash[txHash]) + } + return txs +} diff --git a/internal/ingest/participants.go b/internal/ingest/participants.go index ca33cdd6..3ba84a76 100644 --- a/internal/ingest/participants.go +++ b/internal/ingest/participants.go @@ -2,7 +2,6 @@ package ingest import ( "fmt" - "sort" "time" "github.com/stellar/go/ingest" @@ -16,16 +15,14 @@ import ( // ParticipantsProcessor is a processor which ingests various participants // from different sources (transactions, operations, etc) type ParticipantsProcessor struct { - TxsByParticipant map[Participant][]types.Transaction - OpsByParticipant map[Participant][]types.Operation - network string + network string + dataBundle DataBundle } func NewParticipantsProcessor(network string) *ParticipantsProcessor { return &ParticipantsProcessor{ - network: network, - TxsByParticipant: make(map[Participant][]types.Transaction), - OpsByParticipant: make(map[Participant][]types.Operation), + network: network, + dataBundle: NewDataBundle(network), } } @@ -91,66 +88,87 @@ func participantsForMeta(meta xdr.TransactionMeta) ([]xdr.AccountId, error) { return participants, nil } -func (p *ParticipantsProcessor) addTransactionParticipants( - sequence uint32, - transaction ingest.LedgerTransaction, -) error { - if len(p.OpsByParticipant) == 0 { - err := p.addOperationsParticipants(sequence, transaction) - if err != nil { - return fmt.Errorf("could not determine operation participants: %w", err) - } +func (p *ParticipantsProcessor) addTransactionParticipants(transaction ingest.LedgerTransaction) error { + // 1. Get direct participants involved in the transaction + participants := []xdr.AccountId{ + transaction.Envelope.SourceAccount().ToAccountId(), + } + if transaction.Envelope.IsFeeBump() { + participants = append(participants, transaction.Envelope.FeeBumpAccount().ToAccountId()) } - txsByParticipant, err := participantsForTransaction(sequence, transaction, p.OpsByParticipant) + metaParticipants, err := participantsForMeta(transaction.UnsafeMeta) if err != nil { - return fmt.Errorf("could not determine participants for transaction: %w", err) + return fmt.Errorf("identifying participants for meta: %w", err) } - p.TxsByParticipant = txsByParticipant + participants = append(participants, metaParticipants...) - return nil -} + feeParticipants, err := participantsForChanges(transaction.FeeChanges) + if err != nil { + return fmt.Errorf("identifying participants for changes: %w", err) + } + participants = append(participants, feeParticipants...) -func (p *ParticipantsProcessor) addOperationsParticipants( - sequence uint32, - transaction ingest.LedgerTransaction, -) error { - participants, err := operationsParticipants(transaction, sequence, p.network) + // 2. Build database transaction object + envelopeXDR, err := xdr.MarshalBase64(transaction.Envelope) if err != nil { - return errors.Wrap(err, "could not determine operation participants") + return fmt.Errorf("marshalling transaction envelope: %w", err) + } + + resultXDR, err := xdr.MarshalBase64(transaction.Result) + if err != nil { + return fmt.Errorf("marshalling transaction result: %w", err) + } + + metaXDR, err := xdr.MarshalBase64(transaction.UnsafeMeta) + if err != nil { + return fmt.Errorf("marshalling transaction meta: %w", err) + } + + dbTx := types.Transaction{ + Hash: transaction.Hash.HexString(), + LedgerCreatedAt: transaction.Ledger.ClosedAt(), + IngestedAt: time.Now(), + EnvelopeXDR: envelopeXDR, + ResultXDR: resultXDR, + MetaXDR: metaXDR, + LedgerNumber: transaction.LedgerVersion, } - p.OpsByParticipant = participants + // 3. Push transaction and participants to data bundle + for _, xdrParticipant := range participants { + p.dataBundle.PushTransactionWithParticipant(Participant(xdrParticipant.Address()), dbTx) + } return nil } -type Participant string - -// operationsParticipants returns a map with all participants per operation. -func operationsParticipants(transaction ingest.LedgerTransaction, sequence uint32, network string) (allOpsParticipants map[Participant][]types.Operation, err error) { +func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, transaction ingest.LedgerTransaction) error { now := time.Now() ledgerCreatedAt := transaction.Ledger.ClosedAt() txHash := transaction.Hash.HexString() for opi, xdrOp := range transaction.Envelope.Operations() { + // 1. Build op wrapper, so we can use its methods op := operation_processor.TransactionOperationWrapper{ Index: uint32(opi), Transaction: transaction, Operation: xdrOp, LedgerSequence: sequence, - Network: network, + Network: p.network, } opID := op.ID() - p, err := op.Participants() + // 2. Get participants for the operation + participants, err := op.Participants() if err != nil { - return allOpsParticipants, errors.Wrapf(err, "reading operation %v participants", opID) + return fmt.Errorf("reading operation %v participants: %w", opID, err) } + // 3. Build database operation object xdrOpStr, err := xdr.MarshalBase64(xdrOp) if err != nil { - return allOpsParticipants, fmt.Errorf("marshalling operation %v: %w", opID, err) + return fmt.Errorf("marshalling operation %v: %w", opID, err) } dbOp := types.Operation{ ID: string(opID), @@ -161,13 +179,13 @@ func operationsParticipants(transaction ingest.LedgerTransaction, sequence uint3 TxHash: txHash, } - for _, xdrParticipant := range dedupeParticipants(p) { - participant := Participant(xdrParticipant.Address()) - allOpsParticipants[participant] = append(allOpsParticipants[participant], dbOp) + // 4. Push operation and participants to data bundle + for _, xdrParticipant := range participants { + p.dataBundle.PushOperationWithParticipant(Participant(xdrParticipant.Address()), dbOp) } } - return allOpsParticipants, nil + return nil } func (p *ParticipantsProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { @@ -175,133 +193,9 @@ func (p *ParticipantsProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, tran return err } - if err := p.addTransactionParticipants(lcm.LedgerSequence(), transaction); err != nil { + if err := p.addTransactionParticipants(transaction); err != nil { return err } return nil } - -func directParticipantsForTransaction(transaction ingest.LedgerTransaction) (participantStrs []Participant, err error) { - participants := []xdr.AccountId{ - transaction.Envelope.SourceAccount().ToAccountId(), - } - if transaction.Envelope.IsFeeBump() { - participants = append(participants, transaction.Envelope.FeeBumpAccount().ToAccountId()) - } - - p, err := participantsForMeta(transaction.UnsafeMeta) - if err != nil { - return nil, fmt.Errorf("identifying participants for meta: %w", err) - } - participants = append(participants, p...) - - p, err = participantsForChanges(transaction.FeeChanges) - if err != nil { - return nil, fmt.Errorf("identifying participants for changes: %w", err) - } - participants = append(participants, p...) - participants = dedupeParticipants(participants) - - participantStrs = make([]Participant, len(participants)) - for _, p := range participants { - participantStrs = append(participantStrs, Participant(p.Address())) - } - - return participantStrs, nil -} - -func participantsForTransaction( - sequence uint32, - transaction ingest.LedgerTransaction, - opsByParticipant map[Participant][]types.Operation, -) (txsByParticipant map[Participant][]types.Transaction, err error) { - txDirectParticipants, err := directParticipantsForTransaction(transaction) - if err != nil { - return nil, fmt.Errorf("could not determine participants for transaction: %w", err) - } - - opsParticipants := make([]Participant, 0, len(opsByParticipant)) - for k := range opsByParticipant { - opsParticipants = append(opsParticipants, k) - } - - participants := make([]Participant, 0, len(txDirectParticipants)+len(opsParticipants)) - participants = append(participants, txDirectParticipants...) - participants = append(participants, opsParticipants...) - participants = DedupeComparable(participants) - - envelopeXDR, err := xdr.MarshalBase64(transaction.Envelope) - if err != nil { - return nil, errors.Wrapf(err, "marshalling transaction envelope") - } - - resultXDR, err := xdr.MarshalBase64(transaction.Result) - if err != nil { - return nil, errors.Wrapf(err, "marshalling transaction result") - } - - metaXDR, err := xdr.MarshalBase64(transaction.UnsafeMeta) - if err != nil { - return nil, errors.Wrapf(err, "marshalling transaction meta") - } - - now := time.Now() - txDB := types.Transaction{ - Hash: transaction.Hash.HexString(), - LedgerCreatedAt: transaction.Ledger.ClosedAt(), - IngestedAt: now, - EnvelopeXDR: envelopeXDR, - ResultXDR: resultXDR, - MetaXDR: metaXDR, - LedgerNumber: transaction.LedgerVersion, - } - for _, participant := range participants { - txsByParticipant[participant] = append(txsByParticipant[participant], txDB) - } - - return txsByParticipant, nil -} - -// dedupeParticipants remove any duplicate ids from `in` -func dedupeParticipants(in []xdr.AccountId) []xdr.AccountId { - if len(in) <= 1 { - return in - } - sort.Slice(in, func(i, j int) bool { - return in[i].Address() < in[j].Address() - }) - insert := 1 - for cur := 1; cur < len(in); cur++ { - if in[cur].Equals(in[cur-1]) { - continue - } - if insert != cur { - in[insert] = in[cur] - } - insert++ - } - return in[:insert] -} - -// Dedupe removes duplicate elements from a slice while preserving order. -// It works with any comparable type. -func DedupeComparable[T comparable](slice []T) []T { - if len(slice) == 0 { - return slice - } - - // Create a map to track seen elements - seen := make(map[T]struct{}, len(slice)) - result := make([]T, 0, len(slice)) - - // Iterate through the slice and only add elements we haven't seen before - for _, item := range slice { - if _, exists := seen[item]; !exists { - seen[item] = struct{}{} - result = append(result, item) - } - } - - return result -} From 69b1e0b9cfd3690c3f6d26be4889b9e66308bd52 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 11:37:40 -0700 Subject: [PATCH 16/27] move new files to a different folder, to avoid dependency cycle. --- internal/{ingest => indexer}/data_bundle.go | 2 +- internal/{ingest => indexer}/participants.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) rename internal/{ingest => indexer}/data_bundle.go (99%) rename internal/{ingest => indexer}/participants.go (97%) diff --git a/internal/ingest/data_bundle.go b/internal/indexer/data_bundle.go similarity index 99% rename from internal/ingest/data_bundle.go rename to internal/indexer/data_bundle.go index 6ce7310a..1cd66565 100644 --- a/internal/ingest/data_bundle.go +++ b/internal/indexer/data_bundle.go @@ -1,4 +1,4 @@ -package ingest +package indexer import "github.com/stellar/wallet-backend/internal/indexer/types" diff --git a/internal/ingest/participants.go b/internal/indexer/participants.go similarity index 97% rename from internal/ingest/participants.go rename to internal/indexer/participants.go index 3ba84a76..c0201829 100644 --- a/internal/ingest/participants.go +++ b/internal/indexer/participants.go @@ -1,4 +1,4 @@ -package ingest +package indexer import ( "fmt" @@ -188,7 +188,7 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans return nil } -func (p *ParticipantsProcessor) ProcessTransaction(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { +func (p *ParticipantsProcessor) ProcessTransactionData(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { if err := p.addOperationsParticipants(lcm.LedgerSequence(), transaction); err != nil { return err } From 824dc64e0a0353ab6e276c30fdd6a4144a4921d5 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 11:38:46 -0700 Subject: [PATCH 17/27] Create data bundles that include transactions and operations. --- internal/services/ingest.go | 78 ++++++++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 85929bc8..c9d8bfb5 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "io" "os" "os/signal" "syscall" "time" "github.com/alitto/pond" + "github.com/stellar/go/ingest" "github.com/stellar/go/support/log" "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" @@ -19,6 +21,7 @@ import ( "github.com/stellar/wallet-backend/internal/data" "github.com/stellar/wallet-backend/internal/db" "github.com/stellar/wallet-backend/internal/entities" + "github.com/stellar/wallet-backend/internal/indexer" "github.com/stellar/wallet-backend/internal/metrics" "github.com/stellar/wallet-backend/internal/signing/store" txutils "github.com/stellar/wallet-backend/internal/transactions/utils" @@ -44,12 +47,13 @@ type IngestService interface { var _ IngestService = (*ingestService)(nil) type ingestService struct { - models *data.Models - ledgerCursorName string - appTracker apptracker.AppTracker - rpcService RPCService - chAccStore store.ChannelAccountStore - metricsService metrics.MetricsService + models *data.Models + ledgerCursorName string + appTracker apptracker.AppTracker + rpcService RPCService + chAccStore store.ChannelAccountStore + metricsService metrics.MetricsService + networkPassphrase string } func NewIngestService( @@ -80,12 +84,13 @@ func NewIngestService( } return &ingestService{ - models: models, - ledgerCursorName: ledgerCursorName, - appTracker: appTracker, - rpcService: rpcService, - chAccStore: chAccStore, - metricsService: metricsService, + models: models, + ledgerCursorName: ledgerCursorName, + appTracker: appTracker, + rpcService: rpcService, + chAccStore: chAccStore, + metricsService: metricsService, + networkPassphrase: rpcService.NetworkPassphrase(), }, nil } @@ -235,10 +240,55 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes } func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo) (any, error) { - log.Ctx(ctx).Warnf("🚧 TODO: process ledger %d", ledgerInfo.Sequence) + var xdrLedgerCloseMeta xdr.LedgerCloseMeta + if err := xdr.SafeUnmarshalBase64(ledgerInfo.LedgerMetadata, &xdrLedgerCloseMeta); err != nil { + return nil, fmt.Errorf("unmarshalling ledger close meta: %w", err) + } + + transactions, err := m.getLedgerTransactions(ctx, xdrLedgerCloseMeta) + if err != nil { + return nil, fmt.Errorf("getting ledger transactions: %w", err) + } + + dataBundle := indexer.NewParticipantsProcessor(m.networkPassphrase) + for _, tx := range transactions { + err := dataBundle.ProcessTransactionData(xdrLedgerCloseMeta, tx) + if err != nil { + return nil, fmt.Errorf("processing transaction: %w", err) + } + } + return nil, nil } +func (m *ingestService) getLedgerTransactions(_ context.Context, xdrLedgerCloseMeta xdr.LedgerCloseMeta) ([]ingest.LedgerTransaction, error) { + ledgerTxReader, err := ingest.NewLedgerTransactionReaderFromLedgerCloseMeta(m.networkPassphrase, xdrLedgerCloseMeta) + if err != nil { + return nil, fmt.Errorf("creating ledger transaction reader: %w", err) + } + defer ledgerTxReader.Close() + + transactions := make([]ingest.LedgerTransaction, 0) + for { + tx, err := ledgerTxReader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, fmt.Errorf("reading ledger: %w", err) + } + + if !tx.Successful() { + // TODO: understand what we're indexing for unsuccessful transactions + continue + } + + transactions = append(transactions, tx) + } + + return transactions, nil +} + // fetchNextLedgersBatch fetches the next batch of ledgers from the RPC service. func (m *ingestService) fetchNextLedgersBatch(ctx context.Context, startLedger uint32) (GetLedgersResponse, error) { rpcHealth, err := m.rpcService.GetHealth() @@ -474,7 +524,7 @@ func (m *ingestService) extractInnerTxHash(txXDR string) (string, error) { } } - innerTxHash, err := innerTx.HashHex(m.rpcService.NetworkPassphrase()) + innerTxHash, err := innerTx.HashHex(m.networkPassphrase) if err != nil { return "", fmt.Errorf("generating hash hex: %w", err) } From 6e2b71926dd395e1911301840e299878cccff3c7 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 12:08:05 -0700 Subject: [PATCH 18/27] Make sure unsuccessful transactions are still processed, but their operations are skipped --- internal/indexer/participants.go | 20 ++++++++++++-------- internal/services/ingest.go | 27 ++++++++++----------------- 2 files changed, 22 insertions(+), 25 deletions(-) diff --git a/internal/indexer/participants.go b/internal/indexer/participants.go index c0201829..6baf3b28 100644 --- a/internal/indexer/participants.go +++ b/internal/indexer/participants.go @@ -16,13 +16,13 @@ import ( // from different sources (transactions, operations, etc) type ParticipantsProcessor struct { network string - dataBundle DataBundle + DataBundle DataBundle } func NewParticipantsProcessor(network string) *ParticipantsProcessor { return &ParticipantsProcessor{ network: network, - dataBundle: NewDataBundle(network), + DataBundle: NewDataBundle(network), } } @@ -137,13 +137,17 @@ func (p *ParticipantsProcessor) addTransactionParticipants(transaction ingest.Le // 3. Push transaction and participants to data bundle for _, xdrParticipant := range participants { - p.dataBundle.PushTransactionWithParticipant(Participant(xdrParticipant.Address()), dbTx) + p.DataBundle.PushTransactionWithParticipant(Participant(xdrParticipant.Address()), dbTx) } return nil } func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, transaction ingest.LedgerTransaction) error { + if !transaction.Successful() { + return nil + } + now := time.Now() ledgerCreatedAt := transaction.Ledger.ClosedAt() txHash := transaction.Hash.HexString() @@ -157,21 +161,21 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans LedgerSequence: sequence, Network: p.network, } - opID := op.ID() + opID := fmt.Sprintf("%d", op.ID()) // 2. Get participants for the operation participants, err := op.Participants() if err != nil { - return fmt.Errorf("reading operation %v participants: %w", opID, err) + return fmt.Errorf("reading operation %s participants: %w", opID, err) } // 3. Build database operation object xdrOpStr, err := xdr.MarshalBase64(xdrOp) if err != nil { - return fmt.Errorf("marshalling operation %v: %w", opID, err) + return fmt.Errorf("marshalling operation %s: %w", opID, err) } dbOp := types.Operation{ - ID: string(opID), + ID: opID, OperationType: types.OperationTypeFromXDR(op.OperationType()), OperationXDR: xdrOpStr, LedgerCreatedAt: ledgerCreatedAt, @@ -181,7 +185,7 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans // 4. Push operation and participants to data bundle for _, xdrParticipant := range participants { - p.dataBundle.PushOperationWithParticipant(Participant(xdrParticipant.Address()), dbOp) + p.DataBundle.PushOperationWithParticipant(Participant(xdrParticipant.Address()), dbOp) } } diff --git a/internal/services/ingest.go b/internal/services/ingest.go index c9d8bfb5..6fab2bdd 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -198,10 +198,10 @@ type result[T any] struct { } func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersResponse GetLedgersResponse) error { - log.Ctx(ctx).Warnf("🚧 TODO: process & ingest ledger response") + log.Ctx(ctx).Infof("🚧 Will process & ingest %d ledgers", len(getLedgersResponse.Ledgers)) // Create a worker pool with - const poolSize = 4 + const poolSize = 32 pool := pond.New(poolSize, maxLedgerWindow, pond.Context(ctx)) // Create a slice to store results @@ -232,33 +232,31 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes return fmt.Errorf("processing ledgers: %w", errors.Join(errs...)) } - for _, result := range results { - log.Ctx(ctx).Debugf("Processed ledger %d", result.ledger.Sequence) - } + log.Ctx(ctx).Infof("🚧 Done processing & ingesting %d ledgers", len(getLedgersResponse.Ledgers)) return nil } -func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo) (any, error) { +func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo) (indexer.DataBundle, error) { var xdrLedgerCloseMeta xdr.LedgerCloseMeta if err := xdr.SafeUnmarshalBase64(ledgerInfo.LedgerMetadata, &xdrLedgerCloseMeta); err != nil { - return nil, fmt.Errorf("unmarshalling ledger close meta: %w", err) + return indexer.DataBundle{}, fmt.Errorf("unmarshalling ledger close meta: %w", err) } transactions, err := m.getLedgerTransactions(ctx, xdrLedgerCloseMeta) if err != nil { - return nil, fmt.Errorf("getting ledger transactions: %w", err) + return indexer.DataBundle{}, fmt.Errorf("getting ledger transactions: %w", err) } - dataBundle := indexer.NewParticipantsProcessor(m.networkPassphrase) + participantsProcessor := indexer.NewParticipantsProcessor(m.networkPassphrase) for _, tx := range transactions { - err := dataBundle.ProcessTransactionData(xdrLedgerCloseMeta, tx) + err := participantsProcessor.ProcessTransactionData(xdrLedgerCloseMeta, tx) if err != nil { - return nil, fmt.Errorf("processing transaction: %w", err) + return indexer.DataBundle{}, fmt.Errorf("processing transaction: %w", err) } } - return nil, nil + return participantsProcessor.DataBundle, nil } func (m *ingestService) getLedgerTransactions(_ context.Context, xdrLedgerCloseMeta xdr.LedgerCloseMeta) ([]ingest.LedgerTransaction, error) { @@ -278,11 +276,6 @@ func (m *ingestService) getLedgerTransactions(_ context.Context, xdrLedgerCloseM return nil, fmt.Errorf("reading ledger: %w", err) } - if !tx.Successful() { - // TODO: understand what we're indexing for unsuccessful transactions - continue - } - transactions = append(transactions, tx) } From fd83a4a686f993951228301c1249bf585d51686b Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 13:30:24 -0700 Subject: [PATCH 19/27] Implement AccountModel.GetExisting --- internal/data/accounts.go | 22 ++++++++ internal/data/accounts_test.go | 97 ++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+) diff --git a/internal/data/accounts.go b/internal/data/accounts.go index 299494e1..09a4c9af 100644 --- a/internal/data/accounts.go +++ b/internal/data/accounts.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + "github.com/lib/pq" + "github.com/stellar/wallet-backend/internal/db" "github.com/stellar/wallet-backend/internal/metrics" ) @@ -41,6 +43,26 @@ func (m *AccountModel) Delete(ctx context.Context, address string) error { return nil } +// GetExisting returns only the addresses from the input list that exist in the database. +func (m *AccountModel) GetExisting(ctx context.Context, dbTx db.Transaction, stellarAddresses []string) ([]string, error) { + var sqlExecuter db.SQLExecuter = dbTx + if sqlExecuter == nil { + sqlExecuter = m.DB + } + + const query = "SELECT stellar_address FROM accounts WHERE stellar_address = ANY($1)" + var existingAddresses []string + start := time.Now() + err := sqlExecuter.SelectContext(ctx, &existingAddresses, query, pq.Array(stellarAddresses)) + duration := time.Since(start).Seconds() + m.MetricsService.ObserveDBQueryDuration("SELECT", "accounts", duration) + if err != nil { + return nil, fmt.Errorf("getting existing addresses: %w", err) + } + m.MetricsService.IncDBQuery("SELECT", "accounts") + return existingAddresses, nil +} + // IsAccountFeeBumpEligible checks whether an account is eligible to have its transaction fee-bumped. Channel Accounts should be // eligible because some of the transactions will have the channel accounts as the source account (i. e. create account sponsorship). func (m *AccountModel) IsAccountFeeBumpEligible(ctx context.Context, address string) (bool, error) { diff --git a/internal/data/accounts_test.go b/internal/data/accounts_test.go index ed370590..d75fc11b 100644 --- a/internal/data/accounts_test.go +++ b/internal/data/accounts_test.go @@ -5,6 +5,7 @@ import ( "database/sql" "testing" + "github.com/lib/pq" "github.com/stellar/go/keypair" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -78,6 +79,102 @@ func TestAccountModelDelete(t *testing.T) { assert.ErrorIs(t, err, sql.ErrNoRows) } +func Test_AccountModel_GetExisting(t *testing.T) { + dbt := dbtest.Open(t) + defer dbt.Close() + dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool.Close() + + ctx := context.Background() + existingAddress1 := keypair.MustRandom().Address() + existingAddress2 := keypair.MustRandom().Address() + existingAddresses := []string{existingAddress1, existingAddress2} + nonExistingAddress := keypair.MustRandom().Address() + + // Insert addresses to test against + _, err = dbConnectionPool.ExecContext(ctx, "INSERT INTO accounts (stellar_address) SELECT unnest($1::text[])", pq.Array(existingAddresses)) + require.NoError(t, err) + + testCases := []struct { + name string + inputValues []string + useDbTx bool + wantResults []string + wantErrContains string + }{ + { + name: "🟢all_addresses_found", + inputValues: existingAddresses, + useDbTx: false, + wantResults: existingAddresses, + wantErrContains: "", + }, + { + name: "🟢all_addresses_found_with_db_transaction", + inputValues: existingAddresses, + useDbTx: true, + wantResults: existingAddresses, + wantErrContains: "", + }, + { + name: "🟢no_addresses_found", + inputValues: []string{nonExistingAddress}, + useDbTx: false, + wantResults: nil, + wantErrContains: "", + }, + { + name: "🟢mixed_addresses", + inputValues: []string{existingAddress1, existingAddress2, nonExistingAddress}, + useDbTx: false, + wantResults: []string{existingAddress1, existingAddress2}, + wantErrContains: "", + }, + { + name: "🟢empty_input", + inputValues: []string{}, + useDbTx: false, + wantResults: nil, + wantErrContains: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create fresh mock for each test case + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("ObserveDBQueryDuration", "SELECT", "accounts", mock.Anything).Return(). + On("IncDBQuery", "SELECT", "accounts").Return() + defer mockMetricsService.AssertExpectations(t) + + m := &AccountModel{ + DB: dbConnectionPool, + MetricsService: mockMetricsService, + } + + var dbTx db.Transaction + if tc.useDbTx { + dbTx, err = dbConnectionPool.BeginTxx(ctx, nil) + require.NoError(t, err) + defer dbTx.Rollback() + } + + got, err := m.GetExisting(ctx, dbTx, tc.inputValues) + + if tc.wantErrContains != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErrContains) + assert.Nil(t, got) + } else { + require.NoError(t, err) + assert.Equal(t, tc.wantResults, got) + } + }) + } +} + func TestAccountModelIsAccountFeeBumpEligible(t *testing.T) { dbt := dbtest.Open(t) defer dbt.Close() From 844530e46d77a0fe062007045d11ecf81d71eebb Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 14:57:26 -0700 Subject: [PATCH 20/27] Change Select to SELECT FOR UPDATE --- internal/data/accounts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/data/accounts.go b/internal/data/accounts.go index 09a4c9af..9610065e 100644 --- a/internal/data/accounts.go +++ b/internal/data/accounts.go @@ -50,7 +50,7 @@ func (m *AccountModel) GetExisting(ctx context.Context, dbTx db.Transaction, ste sqlExecuter = m.DB } - const query = "SELECT stellar_address FROM accounts WHERE stellar_address = ANY($1)" + const query = "SELECT stellar_address FROM accounts WHERE stellar_address = ANY($1) FOR UPDATE" var existingAddresses []string start := time.Now() err := sqlExecuter.SelectContext(ctx, &existingAddresses, query, pq.Array(stellarAddresses)) From 392b96e427509c7d4efa0f99b9486cf73d7c8798 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 15:22:30 -0700 Subject: [PATCH 21/27] Implement TransactionModel.BatchInsert --- internal/data/models.go | 18 ++-- internal/data/transactions.go | 116 ++++++++++++++++++++ internal/data/transactions_test.go | 168 +++++++++++++++++++++++++++++ 3 files changed, 294 insertions(+), 8 deletions(-) create mode 100644 internal/data/transactions.go create mode 100644 internal/data/transactions_test.go diff --git a/internal/data/models.go b/internal/data/models.go index 4f10f70c..0c071ef6 100644 --- a/internal/data/models.go +++ b/internal/data/models.go @@ -8,10 +8,11 @@ import ( ) type Models struct { - DB db.ConnectionPool - Account *AccountModel - IngestStore *IngestStoreModel - Payments *PaymentModel + DB db.ConnectionPool + Account *AccountModel + IngestStore *IngestStoreModel + Payments *PaymentModel + Transactions *TransactionModel } func NewModels(db db.ConnectionPool, metricsService metrics.MetricsService) (*Models, error) { @@ -20,9 +21,10 @@ func NewModels(db db.ConnectionPool, metricsService metrics.MetricsService) (*Mo } return &Models{ - DB: db, - Account: &AccountModel{DB: db, MetricsService: metricsService}, - IngestStore: &IngestStoreModel{DB: db, MetricsService: metricsService}, - Payments: &PaymentModel{DB: db, MetricsService: metricsService}, + DB: db, + Account: &AccountModel{DB: db, MetricsService: metricsService}, + IngestStore: &IngestStoreModel{DB: db, MetricsService: metricsService}, + Payments: &PaymentModel{DB: db, MetricsService: metricsService}, + Transactions: &TransactionModel{DB: db, MetricsService: metricsService}, }, nil } diff --git a/internal/data/transactions.go b/internal/data/transactions.go new file mode 100644 index 00000000..388b2cfb --- /dev/null +++ b/internal/data/transactions.go @@ -0,0 +1,116 @@ +package data + +import ( + "context" + "fmt" + "time" + + "github.com/lib/pq" + + "github.com/stellar/wallet-backend/internal/db" + "github.com/stellar/wallet-backend/internal/indexer/types" + "github.com/stellar/wallet-backend/internal/metrics" +) + +type TransactionModel struct { + DB db.ConnectionPool + MetricsService metrics.MetricsService +} + +func (m *TransactionModel) BatchInsert( + ctx context.Context, + dbTx db.SQLExecuter, + txs []types.Transaction, + stellarAddressesByTxHash map[string][]string, +) error { + now := time.Now() + var sqlExecuter db.SQLExecuter = dbTx + if sqlExecuter == nil { + sqlExecuter = m.DB + } + + // 1. Flatten the transactions into parallel slices + hashes := make([]string, len(txs)) + envelopeXDRs := make([]string, len(txs)) + resultXDRs := make([]string, len(txs)) + metaXDRs := make([]string, len(txs)) + ledgerNumbers := make([]int, len(txs)) + ledgerCreatedAts := make([]time.Time, len(txs)) + ingestedAts := make([]time.Time, len(txs)) + + for i, t := range txs { + hashes[i] = t.Hash + envelopeXDRs[i] = t.EnvelopeXDR + resultXDRs[i] = t.ResultXDR + metaXDRs[i] = t.MetaXDR + ledgerNumbers[i] = int(t.LedgerNumber) + ledgerCreatedAts[i] = t.LedgerCreatedAt + ingestedAts[i] = now + } + + // 2. Batch insert the transactions into the database + const insertTxsQuery = ` + INSERT INTO transactions + (hash, envelope_xdr, result_xdr, meta_xdr, ledger_number, ledger_created_at, ingested_at) + SELECT + UNNEST($1::text[]), + UNNEST($2::text[]), + UNNEST($3::text[]), + UNNEST($4::text[]), + UNNEST($5::bigint[]), + UNNEST($6::timestamptz[]), + UNNEST($7::timestamptz[]) + ON CONFLICT (hash) DO NOTHING; + ` + + start := time.Now() + _, err := sqlExecuter.ExecContext(ctx, insertTxsQuery, + pq.Array(hashes), + pq.Array(envelopeXDRs), + pq.Array(resultXDRs), + pq.Array(metaXDRs), + pq.Array(ledgerNumbers), + pq.Array(ledgerCreatedAts), + pq.Array(ingestedAts), + ) + duration := time.Since(start).Seconds() + m.MetricsService.ObserveDBQueryDuration("INSERT", "transactions", duration) + if err != nil { + return fmt.Errorf("batch inserting transactions: %w", err) + } + m.MetricsService.IncDBQuery("INSERT", "transactions") + + // 3. Flatten the stellarAddressesByTxHash into parallel slices + var ( + txHashes []string + accountIDs []string + ) + for txHash, addrs := range stellarAddressesByTxHash { + for _, acct := range addrs { + txHashes = append(txHashes, txHash) + accountIDs = append(accountIDs, acct) + } + } + + // 4. Batch insert the transactions_accounts into the database + const insertLinks = ` + INSERT INTO transactions_accounts (tx_hash, account_id) + SELECT + UNNEST($1::text[]), + UNNEST($2::text[]) + ON CONFLICT DO NOTHING; + ` + start = time.Now() + _, err = sqlExecuter.ExecContext(ctx, insertLinks, + pq.Array(txHashes), + pq.Array(accountIDs), + ) + duration = time.Since(start).Seconds() + m.MetricsService.ObserveDBQueryDuration("INSERT", "transactions_accounts", duration) + if err != nil { + return fmt.Errorf("batch insert transactions_accounts: %w", err) + } + m.MetricsService.IncDBQuery("INSERT", "transactions_accounts") + + return nil +} diff --git a/internal/data/transactions_test.go b/internal/data/transactions_test.go new file mode 100644 index 00000000..ccf78073 --- /dev/null +++ b/internal/data/transactions_test.go @@ -0,0 +1,168 @@ +package data + +import ( + "context" + "testing" + "time" + + "github.com/stellar/go/keypair" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/stellar/wallet-backend/internal/db" + "github.com/stellar/wallet-backend/internal/db/dbtest" + "github.com/stellar/wallet-backend/internal/indexer/types" + "github.com/stellar/wallet-backend/internal/metrics" +) + +func Test_TransactionModel_BatchInsert(t *testing.T) { + dbt := dbtest.Open(t) + defer dbt.Close() + dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) + require.NoError(t, err) + defer dbConnectionPool.Close() + + ctx := context.Background() + now := time.Now() + + // Create test data + kp1 := keypair.MustRandom() + kp2 := keypair.MustRandom() + const q = "INSERT INTO accounts (stellar_address) SELECT UNNEST(ARRAY[$1, $2])" + _, err = dbConnectionPool.ExecContext(ctx, q, kp1.Address(), kp2.Address()) + require.NoError(t, err) + + tx1 := types.Transaction{ + Hash: "tx1", + EnvelopeXDR: "envelope1", + ResultXDR: "result1", + MetaXDR: "meta1", + LedgerNumber: 1, + LedgerCreatedAt: now, + } + tx2 := types.Transaction{ + Hash: "tx2", + EnvelopeXDR: "envelope2", + ResultXDR: "result2", + MetaXDR: "meta2", + LedgerNumber: 2, + LedgerCreatedAt: now, + } + + testCases := []struct { + name string + useDbTx bool + txs []types.Transaction + stellarAddressesByHash map[string][]string + wantErrContains string + wantResults []string + }{ + { + name: "🟢successful_insert_without_transaction", + useDbTx: false, + txs: []types.Transaction{tx1, tx2}, + stellarAddressesByHash: map[string][]string{tx1.Hash: {kp1.Address()}, tx2.Hash: {kp2.Address()}}, + wantErrContains: "", + wantResults: []string{tx1.Hash, tx2.Hash}, + }, + { + name: "🟢successful_insert_with_transaction", + useDbTx: true, + txs: []types.Transaction{tx1}, + stellarAddressesByHash: map[string][]string{tx1.Hash: {kp1.Address()}}, + wantErrContains: "", + wantResults: []string{tx1.Hash}, + }, + { + name: "🟢empty_input", + useDbTx: false, + txs: []types.Transaction{}, + stellarAddressesByHash: map[string][]string{}, + wantErrContains: "", + wantResults: nil, + }, + { + name: "🟡duplicate_transaction", + useDbTx: false, + txs: []types.Transaction{tx1, tx1}, + stellarAddressesByHash: map[string][]string{tx1.Hash: {kp1.Address()}}, + wantErrContains: "", + wantResults: []string{tx1.Hash}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Clear the database before each test + _, err = dbConnectionPool.ExecContext(ctx, "TRUNCATE transactions, transactions_accounts CASCADE") + require.NoError(t, err) + + // Create fresh mock for each test case + mockMetricsService := metrics.NewMockMetricsService() + mockMetricsService. + On("ObserveDBQueryDuration", "INSERT", "transactions", mock.Anything).Return(). + On("IncDBQuery", "INSERT", "transactions").Return(). + On("ObserveDBQueryDuration", "INSERT", "transactions_accounts", mock.Anything).Return(). + On("IncDBQuery", "INSERT", "transactions_accounts").Return() + defer mockMetricsService.AssertExpectations(t) + + m := &TransactionModel{ + DB: dbConnectionPool, + MetricsService: mockMetricsService, + } + + var sqlExecuter db.SQLExecuter = dbConnectionPool + if tc.useDbTx { + tx, err := dbConnectionPool.BeginTxx(ctx, nil) + require.NoError(t, err) + defer tx.Rollback() + sqlExecuter = tx + } + + err := m.BatchInsert(ctx, sqlExecuter, tc.txs, tc.stellarAddressesByHash) + + if tc.wantErrContains != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.wantErrContains) + return + } + + // Verify the results + require.NoError(t, err) + var insertedHashes []string + err = sqlExecuter.SelectContext(ctx, &insertedHashes, "SELECT hash FROM transactions ORDER BY hash") + require.NoError(t, err) + assert.Equal(t, tc.wantResults, insertedHashes) + + // Verify the account links + if len(tc.stellarAddressesByHash) > 0 { + var accountLinks []struct { + TxHash string `db:"tx_hash"` + AccountID string `db:"account_id"` + } + err = sqlExecuter.SelectContext(ctx, &accountLinks, "SELECT tx_hash, account_id FROM transactions_accounts ORDER BY tx_hash, account_id") + require.NoError(t, err) + + // Create a map of tx_hash -> set of account_ids for O(1) lookups + accountLinksMap := make(map[string]map[string]struct{}) + for _, link := range accountLinks { + if _, exists := accountLinksMap[link.TxHash]; !exists { + accountLinksMap[link.TxHash] = make(map[string]struct{}) + } + accountLinksMap[link.TxHash][link.AccountID] = struct{}{} + } + + // Verify each transaction has its expected account links + for txHash, expectedAccounts := range tc.stellarAddressesByHash { + accounts, exists := accountLinksMap[txHash] + require.True(t, exists, "Transaction %s not found in account links", txHash) + for _, expectedAccount := range expectedAccounts { + _, found := accounts[expectedAccount] + assert.True(t, found, "Expected account link not found: tx=%s, account=%s", txHash, expectedAccount) + } + } + } + }) + } +} From 922020f8149a3d142a43ce09a8a3b425c1c05176 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 16:31:51 -0700 Subject: [PATCH 22/27] Remove the Participants type and use strings directly. Also, use a mux in the data bundle to allow concurrent access --- internal/indexer/data_bundle.go | 88 +++++++++++++++++++++++++------- internal/indexer/participants.go | 4 +- 2 files changed, 71 insertions(+), 21 deletions(-) diff --git a/internal/indexer/data_bundle.go b/internal/indexer/data_bundle.go index 1cd66565..8cc63d07 100644 --- a/internal/indexer/data_bundle.go +++ b/internal/indexer/data_bundle.go @@ -1,10 +1,12 @@ package indexer -import "github.com/stellar/wallet-backend/internal/indexer/types" +import ( + "sync" -type Participant string + "github.com/stellar/wallet-backend/internal/indexer/types" +) -// Set is a set of strings that enforces uniqueness +// Set is a set of strings that enforces uniqueness. type Set[T comparable] map[T]struct{} // Add adds a value to the set @@ -15,13 +17,20 @@ func (s Set[T]) Add(value T) { s[value] = struct{}{} } -// Contains checks if a value exists in the set +// Append adds all values from the input set to the current set. +func (s Set[T]) Append(inSet Set[T]) { + for k := range inSet { + s.Add(k) + } +} + +// Contains checks if a value exists in the set. func (s Set[T]) Contains(value T) bool { _, exists := s[value] return exists } -// ToSlice converts the set to a slice +// ToSlice converts the set to a slice. func (s Set[T]) ToSlice() []T { result := make([]T, 0, len(s)) for k := range s { @@ -31,43 +40,64 @@ func (s Set[T]) ToSlice() []T { } type DataBundle struct { + mu sync.RWMutex Network string - Participants Set[Participant] + Participants Set[string] OpByID map[string]types.Operation TxByHash map[string]types.Transaction - TxHashesByParticipant map[Participant]Set[string] - OpIDsByParticipant map[Participant]Set[string] + TxHashesByParticipant map[string]Set[string] + ParticipantsByTxHash map[string]Set[string] + OpIDsByParticipant map[string]Set[string] + ParticipantsByOpID map[string]Set[string] } func NewDataBundle(network string) DataBundle { return DataBundle{ Network: network, - Participants: Set[Participant]{}, + Participants: Set[string]{}, OpByID: map[string]types.Operation{}, TxByHash: map[string]types.Transaction{}, - TxHashesByParticipant: map[Participant]Set[string]{}, - OpIDsByParticipant: map[Participant]Set[string]{}, + TxHashesByParticipant: map[string]Set[string]{}, + OpIDsByParticipant: map[string]Set[string]{}, } } -func (b *DataBundle) PushTransactionWithParticipant(participant Participant, transaction types.Transaction) { +func (b *DataBundle) PushTransactionWithParticipant(participant string, transaction types.Transaction) { + b.mu.Lock() + defer b.mu.Unlock() + b.TxByHash[transaction.Hash] = transaction - b.TxHashesByParticipant[participant].Add(transaction.Hash) b.Participants.Add(participant) + + b.TxHashesByParticipant[participant].Add(transaction.Hash) + b.ParticipantsByTxHash[transaction.Hash].Add(participant) } -func (b *DataBundle) PushOperationWithParticipant(participant Participant, operation types.Operation) { +func (b *DataBundle) PushOperationWithParticipant(participant string, operation types.Operation) { + b.mu.Lock() + defer b.mu.Unlock() + b.OpByID[operation.ID] = operation + b.Participants.Add(participant) + b.OpIDsByParticipant[participant].Add(operation.ID) + b.ParticipantsByOpID[operation.ID].Add(participant) + b.TxHashesByParticipant[participant].Add(operation.TxHash) - b.Participants.Add(participant) + b.ParticipantsByTxHash[operation.TxHash].Add(participant) } -func (b *DataBundle) GetParticipantOperationIDs(participant Participant) Set[string] { +func (b *DataBundle) GetParticipantOperationIDs(participant string) Set[string] { + b.mu.RLock() + defer b.mu.RUnlock() + return b.OpIDsByParticipant[participant] } -func (b *DataBundle) GetParticipantOperations(participant Participant) []types.Operation { +func (b *DataBundle) GetParticipantOperations(participant string) []types.Operation { + b.mu.RLock() + defer b.mu.RUnlock() + ops := []types.Operation{} for _, opID := range b.OpIDsByParticipant[participant].ToSlice() { ops = append(ops, b.OpByID[opID]) @@ -75,14 +105,34 @@ func (b *DataBundle) GetParticipantOperations(participant Participant) []types.O return ops } -func (b *DataBundle) GetParticipantTransactionHashes(participant Participant) Set[string] { +func (b *DataBundle) GetOperationParticipants(operationID string) Set[string] { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.ParticipantsByOpID[operationID] +} + +func (b *DataBundle) GetParticipantTransactionHashes(participant string) Set[string] { + b.mu.RLock() + defer b.mu.RUnlock() + return b.TxHashesByParticipant[participant] } -func (b *DataBundle) GetParticipantTransactions(participant Participant) []types.Transaction { +func (b *DataBundle) GetParticipantTransactions(participant string) []types.Transaction { + b.mu.RLock() + defer b.mu.RUnlock() + txs := []types.Transaction{} for _, txHash := range b.TxHashesByParticipant[participant].ToSlice() { txs = append(txs, b.TxByHash[txHash]) } return txs } + +func (b *DataBundle) GetTransactionParticipants(transactionHash string) Set[string] { + b.mu.RLock() + defer b.mu.RUnlock() + + return b.ParticipantsByTxHash[transactionHash] +} diff --git a/internal/indexer/participants.go b/internal/indexer/participants.go index 6baf3b28..abbb09e1 100644 --- a/internal/indexer/participants.go +++ b/internal/indexer/participants.go @@ -137,7 +137,7 @@ func (p *ParticipantsProcessor) addTransactionParticipants(transaction ingest.Le // 3. Push transaction and participants to data bundle for _, xdrParticipant := range participants { - p.DataBundle.PushTransactionWithParticipant(Participant(xdrParticipant.Address()), dbTx) + p.DataBundle.PushTransactionWithParticipant(xdrParticipant.Address(), dbTx) } return nil @@ -185,7 +185,7 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans // 4. Push operation and participants to data bundle for _, xdrParticipant := range participants { - p.DataBundle.PushOperationWithParticipant(Participant(xdrParticipant.Address()), dbOp) + p.DataBundle.PushOperationWithParticipant(xdrParticipant.Address(), dbOp) } } From 673fa6e85a3310e7f9de9e8504fdfcec5c224b42 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 16:35:02 -0700 Subject: [PATCH 23/27] Start storing the transactions in the DB --- internal/services/ingest.go | 102 ++++++++++++++++++++++++++---------- 1 file changed, 75 insertions(+), 27 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 6fab2bdd..bad428aa 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -22,6 +22,7 @@ import ( "github.com/stellar/wallet-backend/internal/db" "github.com/stellar/wallet-backend/internal/entities" "github.com/stellar/wallet-backend/internal/indexer" + "github.com/stellar/wallet-backend/internal/indexer/types" "github.com/stellar/wallet-backend/internal/metrics" "github.com/stellar/wallet-backend/internal/signing/store" txutils "github.com/stellar/wallet-backend/internal/transactions/utils" @@ -164,12 +165,12 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u // fetch ledgers getLedgersResponse, err := m.fetchNextLedgersBatch(ctx, startLedger) if err != nil { + if errors.Is(err, ErrAlreadyInSync) { + log.Ctx(ctx).Info("Ingestion is already in sync, will retry in a few moments...") + continue + } return fmt.Errorf("fetching next ledgers batch: %w", err) } - if errors.Is(err, ErrAlreadyInSync) { - log.Ctx(ctx).Info("Ingestion is already in sync, will retry in a few moments...") - continue - } // process ledgers err = m.processLedgerResponse(ctx, getLedgersResponse) @@ -187,13 +188,16 @@ func (m *ingestService) Run(ctx context.Context, startLedger uint32, endLedger u m.metricsService.ObserveIngestionDuration(totalIngestionPrometheusLabel, time.Since(totalIngestionStart).Seconds()) ticker.Reset(tickerDuration) - manualTriggerChan <- nil + + if len(getLedgersResponse.Ledgers) > maxLedgerWindow { + manualTriggerChan <- nil + } } } -type result[T any] struct { +type jobResultData struct { ledger protocol.LedgerInfo - processedData T + processedData indexer.DataBundle err error } @@ -201,26 +205,19 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes log.Ctx(ctx).Infof("🚧 Will process & ingest %d ledgers", len(getLedgersResponse.Ledgers)) // Create a worker pool with - const poolSize = 32 + const poolSize = 200 pool := pond.New(poolSize, maxLedgerWindow, pond.Context(ctx)) - // Create a slice to store results - results := make([]result[any], len(getLedgersResponse.Ledgers)) + // Create a slice to store jobResults var errs []error + dataProcessor := indexer.NewParticipantsProcessor(m.networkPassphrase) // Submit tasks to the pool - for i, ledger := range getLedgersResponse.Ledgers { + for _, ledger := range getLedgersResponse.Ledgers { ledger := ledger // Create a new variable to avoid closure issues pool.Submit(func() { - processedData, err := m.processLedger(ctx, ledger) - if err != nil { - err = fmt.Errorf("processing ledger %d: %w", ledger.Sequence, err) - errs = append(errs, err) - } - results[i] = result[any]{ - ledger: ledger, - processedData: processedData, - err: err, + if err := m.processLedger(ctx, ledger, dataProcessor); err != nil { + errs = append(errs, fmt.Errorf("processing ledger %d: %w", ledger.Sequence, err)) } }) } @@ -232,31 +229,82 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes return fmt.Errorf("processing ledgers: %w", errors.Join(errs...)) } + err := m.ingestProcessedData(ctx, dataProcessor) + if err != nil { + return fmt.Errorf("ingesting processed data: %w", err) + } + log.Ctx(ctx).Infof("🚧 Done processing & ingesting %d ledgers", len(getLedgersResponse.Ledgers)) return nil } -func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo) (indexer.DataBundle, error) { +func (m *ingestService) ingestProcessedData(ctx context.Context, dataProcessor *indexer.ParticipantsProcessor) error { + return db.RunInTransaction(ctx, m.models.DB, nil, func(dbTx db.Transaction) error { + dataBundle := &dataProcessor.DataBundle + // 1. Filter participants that are not in the watchlist. + existingAccounts, err := m.models.Account.GetExisting(ctx, dbTx, dataBundle.Participants.ToSlice()) + if err != nil { + return fmt.Errorf("getting existing accounts: %w", err) + } + + if len(existingAccounts) == 0 { + return nil + } + + // 2. Identify which data should be ingested. + txHashesToInsert := make(indexer.Set[string]) + participantsByTxHash := map[string][]string{} + for _, participant := range existingAccounts { + if !dataBundle.Participants.Contains(participant) { + continue + } + + // 2.1. Identify which transactions should be ingested. + participantTxHashes := dataBundle.GetParticipantTransactionHashes(participant) + txHashesToInsert.Append(participantTxHashes) + for txHash := range participantTxHashes { + participantsByTxHash[txHash] = append(participantsByTxHash[txHash], participant) + } + + // 2.2. TODO: Identify which operations should be ingested. + } + + // 3. Build the data to be ingested. + var transactionsToInsert = make([]types.Transaction, 0, len(txHashesToInsert)) + for txHash := range txHashesToInsert { + transactionsToInsert = append(transactionsToInsert, dataBundle.TxByHash[txHash]) + } + + // 4. Insert the transactions into the database. + err = m.models.Transactions.BatchInsert(ctx, dbTx, transactionsToInsert, participantsByTxHash) + if err != nil { + return fmt.Errorf("inserting transactions: %w", err) + } + + return nil + }) +} + +func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.LedgerInfo, dataProcessor *indexer.ParticipantsProcessor) error { var xdrLedgerCloseMeta xdr.LedgerCloseMeta if err := xdr.SafeUnmarshalBase64(ledgerInfo.LedgerMetadata, &xdrLedgerCloseMeta); err != nil { - return indexer.DataBundle{}, fmt.Errorf("unmarshalling ledger close meta: %w", err) + return fmt.Errorf("unmarshalling ledger close meta: %w", err) } transactions, err := m.getLedgerTransactions(ctx, xdrLedgerCloseMeta) if err != nil { - return indexer.DataBundle{}, fmt.Errorf("getting ledger transactions: %w", err) + return fmt.Errorf("getting ledger transactions: %w", err) } - participantsProcessor := indexer.NewParticipantsProcessor(m.networkPassphrase) for _, tx := range transactions { - err := participantsProcessor.ProcessTransactionData(xdrLedgerCloseMeta, tx) + err := dataProcessor.ProcessTransactionData(xdrLedgerCloseMeta, tx) if err != nil { - return indexer.DataBundle{}, fmt.Errorf("processing transaction: %w", err) + return fmt.Errorf("processing transaction: %w", err) } } - return participantsProcessor.DataBundle, nil + return nil } func (m *ingestService) getLedgerTransactions(_ context.Context, xdrLedgerCloseMeta xdr.LedgerCloseMeta) ([]ingest.LedgerTransaction, error) { From 868b0a572991e12c3d69642d5807ba965b273265 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 17:08:33 -0700 Subject: [PATCH 24/27] Fix the data bundle. --- internal/indexer/data_bundle.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/internal/indexer/data_bundle.go b/internal/indexer/data_bundle.go index 8cc63d07..4deaa87f 100644 --- a/internal/indexer/data_bundle.go +++ b/internal/indexer/data_bundle.go @@ -58,7 +58,9 @@ func NewDataBundle(network string) DataBundle { OpByID: map[string]types.Operation{}, TxByHash: map[string]types.Transaction{}, TxHashesByParticipant: map[string]Set[string]{}, + ParticipantsByTxHash: map[string]Set[string]{}, OpIDsByParticipant: map[string]Set[string]{}, + ParticipantsByOpID: map[string]Set[string]{}, } } @@ -69,7 +71,14 @@ func (b *DataBundle) PushTransactionWithParticipant(participant string, transact b.TxByHash[transaction.Hash] = transaction b.Participants.Add(participant) + if b.TxHashesByParticipant[participant] == nil { + b.TxHashesByParticipant[participant] = Set[string]{} + } b.TxHashesByParticipant[participant].Add(transaction.Hash) + + if b.ParticipantsByTxHash[transaction.Hash] == nil { + b.ParticipantsByTxHash[transaction.Hash] = Set[string]{} + } b.ParticipantsByTxHash[transaction.Hash].Add(participant) } @@ -80,10 +89,24 @@ func (b *DataBundle) PushOperationWithParticipant(participant string, operation b.OpByID[operation.ID] = operation b.Participants.Add(participant) + if b.OpIDsByParticipant[participant] == nil { + b.OpIDsByParticipant[participant] = make(Set[string]) + } b.OpIDsByParticipant[participant].Add(operation.ID) + + if b.ParticipantsByOpID[operation.ID] == nil { + b.ParticipantsByOpID[operation.ID] = make(Set[string]) + } b.ParticipantsByOpID[operation.ID].Add(participant) + if b.TxHashesByParticipant[participant] == nil { + b.TxHashesByParticipant[participant] = make(Set[string]) + } b.TxHashesByParticipant[participant].Add(operation.TxHash) + + if b.ParticipantsByTxHash[operation.TxHash] == nil { + b.ParticipantsByTxHash[operation.TxHash] = make(Set[string]) + } b.ParticipantsByTxHash[operation.TxHash].Add(participant) } From 114f2da0cc43037697c42f44bbe811463a447fe6 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 17:09:09 -0700 Subject: [PATCH 25/27] Allow channel account unlock to receive a DB Tx as an input arg --- internal/services/ingest.go | 2 +- internal/services/ingest_test.go | 4 ++-- internal/services/rpc_service.go | 2 +- internal/signing/store/channel_accounts_model.go | 8 ++++++-- internal/signing/store/channel_accounts_model_test.go | 2 +- internal/signing/store/mocks.go | 4 ++-- internal/signing/store/types.go | 2 +- 7 files changed, 14 insertions(+), 10 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index bad428aa..51474c97 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -536,7 +536,7 @@ func (m *ingestService) unlockChannelAccounts(ctx context.Context, ledgerTransac } } - if affectedRows, err := m.chAccStore.UnassignTxAndUnlockChannelAccounts(ctx, innerTxHashes...); err != nil { + if affectedRows, err := m.chAccStore.UnassignTxAndUnlockChannelAccounts(ctx, nil, innerTxHashes...); err != nil { return fmt.Errorf("unlocking channel accounts with txHashes %v: %w", innerTxHashes, err) } else if affectedRows > 0 { log.Ctx(ctx).Infof("unlocked %d channel accounts", affectedRows) diff --git a/internal/services/ingest_test.go b/internal/services/ingest_test.go index b9fc5786..dfc73a51 100644 --- a/internal/services/ingest_test.go +++ b/internal/services/ingest_test.go @@ -484,7 +484,7 @@ func Test_Ingest_RunOld_LatestSyncedLedgerBehindRPC(t *testing.T) { require.NoError(t, err) txHash, err := transaction.HashHex(network.TestNetworkPassphrase) require.NoError(t, err) - mockChAccStore.On("UnassignTxAndUnlockChannelAccounts", mock.Anything, txHash).Return(int64(1), nil).Once() + mockChAccStore.On("UnassignTxAndUnlockChannelAccounts", mock.Anything, mock.Anything, txHash).Return(int64(1), nil).Once() txEnvXDR, err := transaction.Base64() require.NoError(t, err) @@ -553,7 +553,7 @@ func Test_Ingest_RunOld_LatestSyncedLedgerAheadOfRPC(t *testing.T) { On("TrackRPCServiceHealth", ctx, mock.Anything).Once(). On("NetworkPassphrase").Return(network.TestNetworkPassphrase) mockChAccStore := &store.ChannelAccountStoreMock{} - mockChAccStore.On("UnassignTxAndUnlockChannelAccounts", mock.Anything, testInnerTxHash).Return(int64(1), nil).Twice() + mockChAccStore.On("UnassignTxAndUnlockChannelAccounts", mock.Anything, mock.Anything, testInnerTxHash).Return(int64(1), nil).Twice() ingestService, err := NewIngestService(models, "ingestionLedger", &mockAppTracker, &mockRPCService, mockChAccStore, mockMetricsService) require.NoError(t, err) diff --git a/internal/services/rpc_service.go b/internal/services/rpc_service.go index 13bd7fd4..c4aa0059 100644 --- a/internal/services/rpc_service.go +++ b/internal/services/rpc_service.go @@ -340,7 +340,7 @@ func (r *rpcService) sendRPCRequest(method string, params entities.RPCParams) (j err = json.Unmarshal(body, &res) if err != nil { r.metricsService.IncRPCEndpointFailure(method) - return nil, fmt.Errorf("parsing RPC response JSON: %w", err) + return nil, fmt.Errorf("parsing RPC response JSON body %v: %w", string(body), err) } if res.Result == nil { diff --git a/internal/signing/store/channel_accounts_model.go b/internal/signing/store/channel_accounts_model.go index 968eda15..13696385 100644 --- a/internal/signing/store/channel_accounts_model.go +++ b/internal/signing/store/channel_accounts_model.go @@ -93,7 +93,11 @@ func (ca *ChannelAccountModel) AssignTxToChannelAccount(ctx context.Context, pub return nil } -func (ca *ChannelAccountModel) UnassignTxAndUnlockChannelAccounts(ctx context.Context, txHashes ...string) (int64, error) { +func (ca *ChannelAccountModel) UnassignTxAndUnlockChannelAccounts(ctx context.Context, sqlExec db.SQLExecuter, txHashes ...string) (int64, error) { + if sqlExec == nil { + sqlExec = ca.DB + } + if len(txHashes) == 0 { return 0, errors.New("txHashes cannot be empty") } @@ -107,7 +111,7 @@ func (ca *ChannelAccountModel) UnassignTxAndUnlockChannelAccounts(ctx context.Co WHERE locked_tx_hash = ANY($1) ` - res, err := ca.DB.ExecContext(ctx, query, pq.Array(txHashes)) + res, err := sqlExec.ExecContext(ctx, query, pq.Array(txHashes)) if err != nil { return 0, fmt.Errorf("unlocking channel accounts %v: %w", txHashes, err) } diff --git a/internal/signing/store/channel_accounts_model_test.go b/internal/signing/store/channel_accounts_model_test.go index 17d58a03..584373c0 100644 --- a/internal/signing/store/channel_accounts_model_test.go +++ b/internal/signing/store/channel_accounts_model_test.go @@ -255,7 +255,7 @@ func Test_ChannelAccountModel_UnassignTxAndUnlockChannelAccounts(t *testing.T) { require.True(t, chAccFromDB.LockedUntil.Valid) } - rowsAffected, err := m.UnassignTxAndUnlockChannelAccounts(ctx, tc.txHashes(fixtures)...) + rowsAffected, err := m.UnassignTxAndUnlockChannelAccounts(ctx, dbConnectionPool, tc.txHashes(fixtures)...) if tc.expectedErrContains != "" { require.ErrorContains(t, err, tc.expectedErrContains) } else { diff --git a/internal/signing/store/mocks.go b/internal/signing/store/mocks.go index 86d21dda..c300171f 100644 --- a/internal/signing/store/mocks.go +++ b/internal/signing/store/mocks.go @@ -44,8 +44,8 @@ func (s *ChannelAccountStoreMock) AssignTxToChannelAccount(ctx context.Context, return args.Error(0) } -func (s *ChannelAccountStoreMock) UnassignTxAndUnlockChannelAccounts(ctx context.Context, txHashes ...string) (int64, error) { - _ca := []any{ctx} +func (s *ChannelAccountStoreMock) UnassignTxAndUnlockChannelAccounts(ctx context.Context, sqlExec db.SQLExecuter, txHashes ...string) (int64, error) { + _ca := []any{ctx, sqlExec} for _, txHash := range txHashes { _ca = append(_ca, txHash) } diff --git a/internal/signing/store/types.go b/internal/signing/store/types.go index 4d63ad4e..d03a7dd3 100644 --- a/internal/signing/store/types.go +++ b/internal/signing/store/types.go @@ -23,7 +23,7 @@ type ChannelAccountStore interface { Get(ctx context.Context, sqlExec db.SQLExecuter, publicKey string) (*ChannelAccount, error) GetAllByPublicKey(ctx context.Context, sqlExec db.SQLExecuter, publicKeys ...string) ([]*ChannelAccount, error) AssignTxToChannelAccount(ctx context.Context, publicKey string, txHash string) error - UnassignTxAndUnlockChannelAccounts(ctx context.Context, txHashes ...string) (int64, error) + UnassignTxAndUnlockChannelAccounts(ctx context.Context, sqlExec db.SQLExecuter, txHashes ...string) (int64, error) BatchInsert(ctx context.Context, sqlExec db.SQLExecuter, channelAccounts []*ChannelAccount) error Count(ctx context.Context) (int64, error) } From 7edd410c8a11bf54ddc9268ca3260dd4260dd6fa Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Tue, 17 Jun 2025 17:09:36 -0700 Subject: [PATCH 26/27] Start unlocking channel accounts in the new ingestService.Run code --- internal/services/ingest.go | 42 ++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 51474c97..52c61445 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -205,7 +205,7 @@ func (m *ingestService) processLedgerResponse(ctx context.Context, getLedgersRes log.Ctx(ctx).Infof("🚧 Will process & ingest %d ledgers", len(getLedgersResponse.Ledgers)) // Create a worker pool with - const poolSize = 200 + const poolSize = 16 pool := pond.New(poolSize, maxLedgerWindow, pond.Context(ctx)) // Create a slice to store jobResults @@ -277,11 +277,22 @@ func (m *ingestService) ingestProcessedData(ctx context.Context, dataProcessor * } // 4. Insert the transactions into the database. + log.Ctx(ctx).Infof("inserting %d transactions with participants %#v", len(transactionsToInsert), participantsByTxHash) err = m.models.Transactions.BatchInsert(ctx, dbTx, transactionsToInsert, participantsByTxHash) if err != nil { return fmt.Errorf("inserting transactions: %w", err) } + // 5. Unlock channel accounts. + txEnvelopeXDRs := make([]string, 0, len(dataBundle.TxByHash)) + for _, tx := range dataBundle.TxByHash { + txEnvelopeXDRs = append(txEnvelopeXDRs, tx.EnvelopeXDR) + } + err = m.unlockChannelAccounts(ctx, dbTx, txEnvelopeXDRs) + if err != nil { + return fmt.Errorf("unlocking channel accounts: %w", err) + } + return nil }) } @@ -402,7 +413,7 @@ func (m *ingestService) RunOld(ctx context.Context, startLedger uint32, endLedge m.metricsService.ObserveIngestionDuration(paymentPrometheusLabel, time.Since(startTime).Seconds()) // eagerly unlock channel accounts from txs - err = m.unlockChannelAccounts(ctx, ledgerTransactions) + err = m.unlockChannelAccountsOld(ctx, ledgerTransactions) if err != nil { return fmt.Errorf("unlocking channel account from tx: %w", err) } @@ -521,7 +532,32 @@ func (m *ingestService) ingestPayments(ctx context.Context, ledgerTransactions [ } // unlockChannelAccounts unlocks the channel accounts associated with the given transaction XDRs. -func (m *ingestService) unlockChannelAccounts(ctx context.Context, ledgerTransactions []entities.Transaction) error { +func (m *ingestService) unlockChannelAccounts(ctx context.Context, dbTx db.Transaction, txEnvelopeXDRs []string) error { + if len(txEnvelopeXDRs) == 0 { + log.Ctx(ctx).Debug("no transactions to unlock channel accounts from") + return nil + } + + innerTxHashes := make([]string, 0, len(txEnvelopeXDRs)) + for _, txEnvelopeXDR := range txEnvelopeXDRs { + if innerTxHash, err := m.extractInnerTxHash(txEnvelopeXDR); err != nil { + return fmt.Errorf("extracting inner tx hash: %w", err) + } else { + innerTxHashes = append(innerTxHashes, innerTxHash) + } + } + + if affectedRows, err := m.chAccStore.UnassignTxAndUnlockChannelAccounts(ctx, dbTx, innerTxHashes...); err != nil { + return fmt.Errorf("unlocking channel accounts with txHashes %v: %w", innerTxHashes, err) + } else if affectedRows > 0 { + log.Ctx(ctx).Infof("unlocked %d channel accounts", affectedRows) + } + + return nil +} + +// unlockChannelAccountsOld unlocks the channel accounts associated with the given transaction XDRs. +func (m *ingestService) unlockChannelAccountsOld(ctx context.Context, ledgerTransactions []entities.Transaction) error { if len(ledgerTransactions) == 0 { log.Ctx(ctx).Debug("no transactions to unlock channel accounts from") return nil From 43899de58119f203f8f02e34da00256701703b35 Mon Sep 17 00:00:00 2001 From: Marcelo Salloum Date: Wed, 18 Jun 2025 11:08:28 -0700 Subject: [PATCH 27/27] Fix transaction ledger sequence and stop sending redundant `sequence` field --- internal/indexer/participants.go | 11 ++++++----- internal/services/ingest.go | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/indexer/participants.go b/internal/indexer/participants.go index abbb09e1..c2cfdebf 100644 --- a/internal/indexer/participants.go +++ b/internal/indexer/participants.go @@ -132,7 +132,7 @@ func (p *ParticipantsProcessor) addTransactionParticipants(transaction ingest.Le EnvelopeXDR: envelopeXDR, ResultXDR: resultXDR, MetaXDR: metaXDR, - LedgerNumber: transaction.LedgerVersion, + LedgerNumber: transaction.Ledger.LedgerSequence(), } // 3. Push transaction and participants to data bundle @@ -143,7 +143,7 @@ func (p *ParticipantsProcessor) addTransactionParticipants(transaction ingest.Le return nil } -func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, transaction ingest.LedgerTransaction) error { +func (p *ParticipantsProcessor) addOperationsParticipants(transaction ingest.LedgerTransaction) error { if !transaction.Successful() { return nil } @@ -151,6 +151,7 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans now := time.Now() ledgerCreatedAt := transaction.Ledger.ClosedAt() txHash := transaction.Hash.HexString() + ledgerSequence := transaction.Ledger.LedgerSequence() for opi, xdrOp := range transaction.Envelope.Operations() { // 1. Build op wrapper, so we can use its methods @@ -158,7 +159,7 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans Index: uint32(opi), Transaction: transaction, Operation: xdrOp, - LedgerSequence: sequence, + LedgerSequence: ledgerSequence, Network: p.network, } opID := fmt.Sprintf("%d", op.ID()) @@ -192,8 +193,8 @@ func (p *ParticipantsProcessor) addOperationsParticipants(sequence uint32, trans return nil } -func (p *ParticipantsProcessor) ProcessTransactionData(lcm xdr.LedgerCloseMeta, transaction ingest.LedgerTransaction) error { - if err := p.addOperationsParticipants(lcm.LedgerSequence(), transaction); err != nil { +func (p *ParticipantsProcessor) ProcessTransactionData(transaction ingest.LedgerTransaction) error { + if err := p.addOperationsParticipants(transaction); err != nil { return err } diff --git a/internal/services/ingest.go b/internal/services/ingest.go index 52c61445..c20e16db 100644 --- a/internal/services/ingest.go +++ b/internal/services/ingest.go @@ -309,7 +309,7 @@ func (m *ingestService) processLedger(ctx context.Context, ledgerInfo protocol.L } for _, tx := range transactions { - err := dataProcessor.ProcessTransactionData(xdrLedgerCloseMeta, tx) + err := dataProcessor.ProcessTransactionData(tx) if err != nil { return fmt.Errorf("processing transaction: %w", err) }