From 6213b54bb32302169267fef08f49009e61434342 Mon Sep 17 00:00:00 2001 From: Yahya JIRARI Date: Thu, 17 Jul 2025 00:25:06 +0200 Subject: [PATCH 1/5] feat(getNextCrudTransactionBatch): Add batched transaction processing method --- CHANGELOG.md | 199 +++++++-------- .../kotlin/com/powersync/DatabaseTest.kt | 227 ++++++++++++++++++ .../kotlin/com/powersync/PowerSyncDatabase.kt | 19 ++ .../com/powersync/db/PowerSyncDatabaseImpl.kt | 56 +++++ 4 files changed, 404 insertions(+), 97 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61ab0a93..c473787a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,29 +1,34 @@ # Changelog +## 1.4.0 (unreleased) + +- Add `getNextCrudTransactionBatch` method to `PowerSyncDatabase` which allows for fetching a batch of CRUD operations to upload. + This is useful for uploading multiple transactions in a single batch. + ## 1.3.0 (unreleased) -* Support tables created outside of PowerSync with the `RawTable` API. +- Support tables created outside of PowerSync with the `RawTable` API. For more information, see [the documentation](https://docs.powersync.com/usage/use-case-examples/raw-tables). -* Fix `runWrapped` catching cancellation exceptions. -* Fix errors in `PowerSyncBackendConnector.fetchCredentials()` crashing Android apps. +- Fix `runWrapped` catching cancellation exceptions. +- Fix errors in `PowerSyncBackendConnector.fetchCredentials()` crashing Android apps. ## 1.2.2 -* Supabase: Avoid creating `Json` serializers multiple times. -* Fix local writes not being uploaded correctly when using WebSockets as a transport protocol. +- Supabase: Avoid creating `Json` serializers multiple times. +- Fix local writes not being uploaded correctly when using WebSockets as a transport protocol. ## 1.2.1 -* [Supabase Connector] Fixed issue where only `400` HTTP status code errors where reported as connection errors. The connector now reports errors for codes `>=400`. -* Update PowerSync core extension to `0.4.1`, fixing an issue with the new Rust client. -* Rust sync client: Fix writes made while offline not being uploaded reliably. -* Add watchOS support. +- [Supabase Connector] Fixed issue where only `400` HTTP status code errors where reported as connection errors. The connector now reports errors for codes `>=400`. +- Update PowerSync core extension to `0.4.1`, fixing an issue with the new Rust client. +- Rust sync client: Fix writes made while offline not being uploaded reliably. +- Add watchOS support. ## 1.2.0 -* Add a new sync client implementation written in Rust instead of Kotlin. While this client is still +- Add a new sync client implementation written in Rust instead of Kotlin. While this client is still experimental, we intend to make it the default in the future. The main benefit of this client is - faster sync performance, but upcoming features will also require this client. We encourage + faster sync performance, but upcoming features will also require this client. We encourage interested users to try it out by opting in to `ExperimentalPowerSyncAPI` and passing options when connecting: ```Kotlin @@ -34,7 +39,7 @@ ``` Switching between the clients can be done at any time without compatibility issues. If you run into issues with the new client, please reach out to us! -* In addition to HTTP streams, the Kotlin SDK also supports fetching sync instructions from the +- In addition to HTTP streams, the Kotlin SDK also supports fetching sync instructions from the PowerSync service in a binary format. This requires the new sync client, and can then be enabled on the sync options: ```Kotlin @@ -44,76 +49,76 @@ method = ConnectionMethod.WebSocket() )) ``` -* [Android, JVM] Use version `0.4.0` of `powersync-sqlite-core`. +- [Android, JVM] Use version `0.4.0` of `powersync-sqlite-core`. ## 1.1.1 -* Fix reported progress around compactions / defrags on the sync service. -* [Android] Set `temp_store_directory`, avoiding crashes for large materialized views. +- Fix reported progress around compactions / defrags on the sync service. +- [Android] Set `temp_store_directory`, avoiding crashes for large materialized views. ## 1.1.0 -* Add `trackPreviousValues` option on `Table` which sets `CrudEntry.previousValues` to previous values on updates. -* Add `trackMetadata` option on `Table` which adds a `_metadata` column that can be used for updates. +- Add `trackPreviousValues` option on `Table` which sets `CrudEntry.previousValues` to previous values on updates. +- Add `trackMetadata` option on `Table` which adds a `_metadata` column that can be used for updates. The configured metadata is available through `CrudEntry.metadata`. -* Add `ignoreEmptyUpdates` option which skips creating CRUD entries for updates that don't change any values. +- Add `ignoreEmptyUpdates` option which skips creating CRUD entries for updates that don't change any values. ## 1.0.1 -* [Internal] Version bump for broken Swift release pipeline +- [Internal] Version bump for broken Swift release pipeline ## 1.0.0 -* Bump SDK to V1/Stable feature status -* Fixed `CrudBatch` `hasMore` always returning false. -* Added `triggerImmediately` to `onChange` method. -* Report real-time progress information about downloads through `SyncStatus.downloadProgress`. -* Compose: Add `composeState()` extension method on `SyncStatus`. -* [Internal] Added helper method for Swift `PowerSyncException` throwing. +- Bump SDK to V1/Stable feature status +- Fixed `CrudBatch` `hasMore` always returning false. +- Added `triggerImmediately` to `onChange` method. +- Report real-time progress information about downloads through `SyncStatus.downloadProgress`. +- Compose: Add `composeState()` extension method on `SyncStatus`. +- [Internal] Added helper method for Swift `PowerSyncException` throwing. ## 1.0.0-BETA32 -* Added `onChange` method to the PowerSync client. This allows for observing table changes. -* Removed unnecessary `User-Id` header from internal PowerSync service requests. -* Fix loading native PowerSync extension for Java targets. +- Added `onChange` method to the PowerSync client. This allows for observing table changes. +- Removed unnecessary `User-Id` header from internal PowerSync service requests. +- Fix loading native PowerSync extension for Java targets. ## 1.0.0-BETA31 -* Added helpers for Attachment syncing. -* Fix `getNextCrudTransaction()` only returning a single item. +- Added helpers for Attachment syncing. +- Fix `getNextCrudTransaction()` only returning a single item. ## 1.0.0-BETA30 -* Fix a deadlock when calling `connect()` immediately after opening a database. +- Fix a deadlock when calling `connect()` immediately after opening a database. The issue has been introduced in version `1.0.0-BETA29`. ## 1.0.0-BETA29 -* Fix potential race condition between jobs in `connect()` and `disconnect()`. -* [JVM Windows] Fixed PowerSync Extension temporary file deletion error on process shutdown. -* [iOS] Fixed issue where automatic driver migrations would fail with the error: +- Fix potential race condition between jobs in `connect()` and `disconnect()`. +- [JVM Windows] Fixed PowerSync Extension temporary file deletion error on process shutdown. +- [iOS] Fixed issue where automatic driver migrations would fail with the error: ``` Sqlite operation failure database is locked attempted to run migration and failed. closing connection ``` -* Fix race condition causing data received during uploads not to be applied. +- Fix race condition causing data received during uploads not to be applied. ## 1.0.0-BETA28 -* Update PowerSync SQLite core extension to 0.3.12. -* Added queing protection and warnings when connecting multiple PowerSync clients to the same +- Update PowerSync SQLite core extension to 0.3.12. +- Added queing protection and warnings when connecting multiple PowerSync clients to the same database file. -* Improved concurrent SQLite connection support accross various platforms. All platforms now use a +- Improved concurrent SQLite connection support accross various platforms. All platforms now use a single write connection and multiple read connections for concurrent read queries. -* Added the ability to open a SQLite database given a custom `dbDirectory` path. This is currently +- Added the ability to open a SQLite database given a custom `dbDirectory` path. This is currently not supported on iOS due to internal driver restrictions. -* Internaly improved the linking of SQLite for iOS. -* Enabled Full Text Search on iOS platforms. -* Added the ability to update the schema for existing PowerSync clients. -* Fixed bug where local only, insert only and view name overrides were not applied for schema +- Internaly improved the linking of SQLite for iOS. +- Enabled Full Text Search on iOS platforms. +- Added the ability to update the schema for existing PowerSync clients. +- Fixed bug where local only, insert only and view name overrides were not applied for schema tables. -* The Android SQLite driver now uses +- The Android SQLite driver now uses the [Xerial JDBC library](https://github.com/xerial/sqlite-jdbc). This removes the requirement for users to add the jitpack Maven repository to their projects. @@ -130,52 +135,52 @@ Sqlite operation failure database is locked attempted to run migration and faile ## 1.0.0-BETA27 -* Improved watch query internals. Added the ability to throttle watched queries. -* Fixed `uploading` and `downloading` sync status indicators. +- Improved watch query internals. Added the ability to throttle watched queries. +- Fixed `uploading` and `downloading` sync status indicators. ## 1.0.0-BETA26 -* Support bucket priorities and partial syncs. -* Android: Add ProGuard rules to prevent methods called through JNI from being minified or removed. +- Support bucket priorities and partial syncs. +- Android: Add ProGuard rules to prevent methods called through JNI from being minified or removed. ## 1.0.0-BETA25 -* JVM: Lower minimum supported version from 17 to 8. +- JVM: Lower minimum supported version from 17 to 8. ## 1.0.0-BETA24 -* Improve internal handling of watch queries to avoid issues where updates are not being received +- Improve internal handling of watch queries to avoid issues where updates are not being received due to transaction commits occurring after the query is run. -* Fix issue in JVM build where `columnNames` was throwing an error due to the index of the JDBC +- Fix issue in JVM build where `columnNames` was throwing an error due to the index of the JDBC driver starting at 1 instead of 0 as in the other drivers/ -* Throw and not just catch `CancellationExceptions` in `runWrappedSuspending` +- Throw and not just catch `CancellationExceptions` in `runWrappedSuspending` ## 1.0.0-BETA23 -* Make `execute` and `PowerSyncTransaction` functions throwable for Swift +- Make `execute` and `PowerSyncTransaction` functions throwable for Swift ## 1.0.0-BETA22 -* Fix `updateHasSynced` internal null pointer exception +- Fix `updateHasSynced` internal null pointer exception ## 1.0.0-BETA21 -* Improve error handling for Swift by adding @Throws annotation so errors can be handled in Swift -* Throw PowerSync exceptions for all public facing methods +- Improve error handling for Swift by adding @Throws annotation so errors can be handled in Swift +- Throw PowerSync exceptions for all public facing methods ## 1.0.0-BETA20 -* Add cursor optional functions: `getStringOptional`, `getLongOptional`, `getDoubleOptional`, +- Add cursor optional functions: `getStringOptional`, `getLongOptional`, `getDoubleOptional`, `getBooleanOptional` and `getBytesOptional` when using the column name which allow for optional return types -* Throw errors for invalid column on all cursor functions -* `getString`, `getLong`, `getBytes`, `getDouble` and `getBoolean` used with the column name will +- Throw errors for invalid column on all cursor functions +- `getString`, `getLong`, `getBytes`, `getDouble` and `getBoolean` used with the column name will now throw an error for non-null values and expect a non optional return type ## 1.0.0-BETA19 -* Allow cursor to get values by column name e.g. `getStringOptional("id")` -* BREAKING CHANGE: If you were using `SqlCursor` from SqlDelight previously for your own custom +- Allow cursor to get values by column name e.g. `getStringOptional("id")` +- BREAKING CHANGE: If you were using `SqlCursor` from SqlDelight previously for your own custom mapper then you must now change to `SqlCursor` exported by the PowerSync module. Previously you would import it like this: @@ -192,80 +197,80 @@ Sqlite operation failure database is locked attempted to run migration and faile ## 1.0.0-BETA18 -* BREAKING CHANGE: Move from async sqldelight calls to synchronous calls. This will only affect +- BREAKING CHANGE: Move from async sqldelight calls to synchronous calls. This will only affect `readTransaction` and `writeTransaction`where the callback function is no longer asynchronous. ## 1.0.0-BETA17 -* Add fix for Windows using JVM build +- Add fix for Windows using JVM build ## 1.0.0-BETA16 -* Add `close` method to database methods -* Throw when error is a `CancellationError` and remove invalidation for all errors in +- Add `close` method to database methods +- Throw when error is a `CancellationError` and remove invalidation for all errors in `streamingSync` catch. ## 1.0.0-BETA15 -* Update powersync-sqlite-core to 0.3.8 -* Increase maximum amount of columns from 63 to 1999 +- Update powersync-sqlite-core to 0.3.8 +- Increase maximum amount of columns from 63 to 1999 ## 1.0.0-BETA14 -* Add JVM compatibility -* Revert previous iOS changes as they resulted in further issues. +- Add JVM compatibility +- Revert previous iOS changes as they resulted in further issues. ## 1.0.0-BETA13 -* Move iOS database driver to use IO dispatcher which should avoid race conditions and improve +- Move iOS database driver to use IO dispatcher which should avoid race conditions and improve performance. ## 1.0.0-BETA12 -* Use transaction context in `writeTransaction` in `BucketStorageImpl`. +- Use transaction context in `writeTransaction` in `BucketStorageImpl`. ## 1.0.0-BETA11 -* Update version to fix deployment issue of previous release +- Update version to fix deployment issue of previous release ## 1.0.0-BETA10 -* Change Swift package name from `PowerSync` to `PowerSyncKotlin` +- Change Swift package name from `PowerSync` to `PowerSyncKotlin` ## 1.0.0-BETA9 -* Re-enable SKIE `SuspendInterop` -* Move transaction functions out of `PowerSyncTransactionFactory` to avoid threading issues in Swift +- Re-enable SKIE `SuspendInterop` +- Move transaction functions out of `PowerSyncTransactionFactory` to avoid threading issues in Swift SDK ## 1.0.0-BETA8 -* Disable SKIE `SuspendInterop` plugin to fix overriding `suspend` functions in Swift +- Disable SKIE `SuspendInterop` plugin to fix overriding `suspend` functions in Swift ## 1.0.0-BETA7 -* Update supabase connector to use supabase-kt version 3 -* Handle Postgres error codes in supabase connector +- Update supabase connector to use supabase-kt version 3 +- Handle Postgres error codes in supabase connector ## 1.0.0-BETA6 -* Fix Custom Write Checkpoint application logic +- Fix Custom Write Checkpoint application logic ## 1.0.0-BETA5 -* Fix `hasSynced` not updating after `disconnectAndClear` -* Fix error being thrown in iOS app launch +- Fix `hasSynced` not updating after `disconnectAndClear` +- Fix error being thrown in iOS app launch ## 1.0.0-BETA4 -* Fix sync status being reset when `update` function is run +- Fix sync status being reset when `update` function is run ## 1.0.0-BETA3 -* Add `waitForFirstSync` function - which resolves after the initial sync is completed -* Upgrade to Kotlin 2.0.20 - should not cause any issues with users who are still on Kotlin 1.9 -* Upgrade `powersync-sqlite-core` to 0.3.0 - improves incremental sync performance -* Add client sync parameters - which allows you specify sync parameters from the +- Add `waitForFirstSync` function - which resolves after the initial sync is completed +- Upgrade to Kotlin 2.0.20 - should not cause any issues with users who are still on Kotlin 1.9 +- Upgrade `powersync-sqlite-core` to 0.3.0 - improves incremental sync performance +- Add client sync parameters - which allows you specify sync parameters from the client https://docs.powersync.com/usage/sync-rules/advanced-topics/client-parameters-beta ```kotlin @@ -283,25 +288,25 @@ params = params ) ``` -* Add schema validation when schema is generated -* Add warning message if there is a crudItem in the queue that has not yet been synced and after a +- Add schema validation when schema is generated +- Add warning message if there is a crudItem in the queue that has not yet been synced and after a delay rerun the upload ## 1.0.0-BETA2 -* Publish persistence package +- Publish persistence package ## 1.0.0-BETA1 -* Improve API by changing from Builder pattern to simply instantiating the database +- Improve API by changing from Builder pattern to simply instantiating the database `PowerSyncDatabase` E.g. `val db = PowerSyncDatabase(factory, schema)` -* Use callback context in transactions +- Use callback context in transactions E.g. `db.writeTransaction{ ctx -> ctx.execute(...) }` -* Removed unnecessary expiredAt field -* Added table max column validation as there is a hard limit of 63 columns -* Moved SQLDelight models to a separate module to reduce export size -* Replaced default Logger with [Kermit Logger](https://kermit.touchlab.co/) which allows users to +- Removed unnecessary expiredAt field +- Added table max column validation as there is a hard limit of 63 columns +- Moved SQLDelight models to a separate module to reduce export size +- Replaced default Logger with [Kermit Logger](https://kermit.touchlab.co/) which allows users to more easily use and/or change Logger settings -* Add `retryDelay` and `crudThrottle` options when setting up database connection -* Changed `_viewNameOverride` to `viewNameOverride` +- Add `retryDelay` and `crudThrottle` options when setting up database connection +- Changed `_viewNameOverride` to `viewNameOverride` diff --git a/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt b/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt index 3e5f5f19..4777fe8c 100644 --- a/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt +++ b/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt @@ -459,4 +459,231 @@ class DatabaseTest { database.getCrudBatch() shouldBe null } + + @Test + fun testCrudTransactionBatch() = + databaseTest { + // Create a single insert (transaction 1) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("a", "a@example.org"), + ) + + // Create a transaction with 2 inserts (transaction 2) + database.writeTransaction { + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("b", "b@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("c", "c@example.org"), + ) + } + + // Create another single insert (transaction 3) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("d", "d@example.org"), + ) + + // Create another transaction with 3 inserts (transaction 4) + database.writeTransaction { + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("e", "e@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("f", "f@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("g", "g@example.org"), + ) + } + + // Test with limit of 2 transactions + var batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + batch.hasMore shouldBe true + batch.crud shouldHaveSize 3 // 1 entry from transaction 1 + 2 entries from transaction 2 + batch.complete(null) + + // Test with limit of 1 transaction + batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + batch.hasMore shouldBe true + batch.crud shouldHaveSize 1 // 1 entry from transaction 3 + batch.complete(null) + + // Test with large limit that covers remaining transactions + batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 3 // 3 entries from transaction 4 + batch.complete(null) + + // Should be no more transactions + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchWithNullTxId() = + databaseTest { + // Create operations without transactions (NULL tx_id) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("a", "a@example.org"), + ) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("b", "b@example.org"), + ) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("c", "c@example.org"), + ) + + // Each NULL tx_id operation should be treated as its own transaction + var batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + batch.hasMore shouldBe true + batch.crud shouldHaveSize 2 // 2 individual transactions + batch.complete(null) + + // Get the remaining transaction + batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 1 // 1 remaining transaction + batch.complete(null) + + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchLargeTransaction() = + databaseTest { + // Create a large transaction with many operations + database.writeTransaction { + repeat(10) { i -> + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user$i", "user$i@example.org"), + ) + } + } + + // Add a single operation + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("single", "single@example.org"), + ) + + // Should get the entire large transaction (10 operations) in one batch + var batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + batch.hasMore shouldBe true + batch.crud shouldHaveSize 10 + batch.complete(null) + + // Should get the single operation + batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 1 + batch.complete(null) + + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchOrdering() = + databaseTest { + // Create operations in a specific order to test ordering + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("first", "first@example.org"), + ) + + database.writeTransaction { + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("second_a", "second_a@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("second_b", "second_b@example.org"), + ) + } + + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("third", "third@example.org"), + ) + + // Operations should be processed in order + val batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 4 + + // Verify order by checking operation data + val operations = batch.crud + operations[0].opData!!["name"] shouldBe "first" + operations[1].opData!!["name"] shouldBe "second_a" + operations[2].opData!!["name"] shouldBe "second_b" + operations[3].opData!!["name"] shouldBe "third" + + batch.complete(null) + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchEmptyDatabase() = + databaseTest { + val batch = database.getNextCrudTransactionBatch() + batch shouldBe null + } + + @Test + fun testCrudTransactionBatchZeroLimit() = + databaseTest { + // Create some operations + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("a", "a@example.org"), + ) + + // Zero limit should return null even if operations exist + val batch = database.getNextCrudTransactionBatch(0) + batch shouldBe null + } + + @Test + fun testCrudTransactionBatchShouldCountTransactionsNotOperations() = + databaseTest { + // Create a transaction with 3 operations + database.writeTransaction { + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("tx1_op1", "tx1_op1@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("tx1_op2", "tx1_op2@example.org"), + ) + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("tx1_op3", "tx1_op3@example.org"), + ) + } + + // Create a single operation (NULL tx_id) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("single", "single@example.org"), + ) + + // Request limit of 2 transactions - should get all 4 operations (3 from tx + 1 single) + val batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 4 + batch.complete(null) + + database.getNextCrudTransactionBatch() shouldBe null + } } diff --git a/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt b/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt index d587932d..3de07a8e 100644 --- a/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt +++ b/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt @@ -134,6 +134,25 @@ public interface PowerSyncDatabase : Queries { @Throws(PowerSyncException::class, CancellationException::class) public suspend fun getNextCrudTransaction(): CrudTransaction? + /** + * Get a batch of crud data from multiple transactions to upload. + * + * Returns null if there is no data to upload. + * + * Use this from the [PowerSyncBackendConnector.uploadData]` callback. + * + * Once the data have been successfully uploaded, call [CrudBatch.complete] before + * requesting the next batch. + * + * Unlike [getCrudBatch], this groups data by transaction, allowing developers to + * upload multiple complete transactions in a single batch operation. + * + * @param transactionLimit The maximum number of transactions to include in the batch. + * Default is 5. + */ + @Throws(PowerSyncException::class, CancellationException::class) + public suspend fun getNextCrudTransactionBatch(transactionLimit: Int = 10): CrudBatch? + /** * Convenience method to get the current version of PowerSync. */ diff --git a/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt b/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt index 51880ee1..d30d3547 100644 --- a/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt +++ b/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt @@ -309,6 +309,62 @@ internal class PowerSyncDatabaseImpl( } } + override suspend fun getNextCrudTransactionBatch(transactionLimit: Int): CrudBatch? { + waitReady() + return internalDb.readTransaction { transaction -> + // Since tx_id can be null, we can't use a WHERE tx_id < ? with transactionLimit + first crud entry tx_id + // So we get all operations and group them by transaction or fall back to an individual transaction if tx_id is null + val allOperations = + transaction.getAll( + "SELECT id, tx_id, data FROM ps_crud ORDER BY id ASC", + ) { cursor -> + CrudEntry.fromRow( + CrudRow( + id = cursor.getString("id"), + data = cursor.getString("data"), + txId = cursor.getLongOptional("tx_id")?.toInt(), + ), + ) + } + + val result = mutableListOf() + val processedTransactions = mutableSetOf() + var transactionCount = 0 + + for (operation in allOperations) { + if (transactionCount >= transactionLimit) break + + val txId = operation.transactionId + if (txId == null) { + // NULL tx_id operations are individual transactions + result.add(operation) + transactionCount++ + } else if (txId !in processedTransactions) { + val transactionOperations = bucketStorage.getCrudItemsByTransactionId(txId, transaction) + result.addAll(transactionOperations) + processedTransactions.add(txId) + transactionCount++ + } + } + + if (result.isEmpty()) { + return@readTransaction null + } + + val hasMore = result.size < allOperations.size + val last = result.last() + + return@readTransaction CrudBatch( + crud = result, + hasMore = hasMore, + complete = { writeCheckpoint -> + logger.i { "[CrudTransactionBatch::complete] Completing batch with checkpoint $writeCheckpoint" } + handleWriteCheckpoint(last.clientId, writeCheckpoint) + }, + ) + } + } + override suspend fun getPowerSyncVersion(): String { // The initialization sets powerSyncVersion. waitReady() From 63db576891e8216529f55d8ef3fb78ca20001e51 Mon Sep 17 00:00:00 2001 From: Yahya JIRARI Date: Thu, 17 Jul 2025 00:27:30 +0200 Subject: [PATCH 2/5] feat(getNextCrudTransactionBatch): revert unwanted formatting changes to CHANGELOG.md --- CHANGELOG.md | 195 ++++++++++++++++++++++++++------------------------- 1 file changed, 98 insertions(+), 97 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c473787a..fe41958a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,30 +5,31 @@ - Add `getNextCrudTransactionBatch` method to `PowerSyncDatabase` which allows for fetching a batch of CRUD operations to upload. This is useful for uploading multiple transactions in a single batch. + ## 1.3.0 (unreleased) -- Support tables created outside of PowerSync with the `RawTable` API. +* Support tables created outside of PowerSync with the `RawTable` API. For more information, see [the documentation](https://docs.powersync.com/usage/use-case-examples/raw-tables). -- Fix `runWrapped` catching cancellation exceptions. -- Fix errors in `PowerSyncBackendConnector.fetchCredentials()` crashing Android apps. +* Fix `runWrapped` catching cancellation exceptions. +* Fix errors in `PowerSyncBackendConnector.fetchCredentials()` crashing Android apps. ## 1.2.2 -- Supabase: Avoid creating `Json` serializers multiple times. -- Fix local writes not being uploaded correctly when using WebSockets as a transport protocol. +* Supabase: Avoid creating `Json` serializers multiple times. +* Fix local writes not being uploaded correctly when using WebSockets as a transport protocol. ## 1.2.1 -- [Supabase Connector] Fixed issue where only `400` HTTP status code errors where reported as connection errors. The connector now reports errors for codes `>=400`. -- Update PowerSync core extension to `0.4.1`, fixing an issue with the new Rust client. -- Rust sync client: Fix writes made while offline not being uploaded reliably. -- Add watchOS support. +* [Supabase Connector] Fixed issue where only `400` HTTP status code errors where reported as connection errors. The connector now reports errors for codes `>=400`. +* Update PowerSync core extension to `0.4.1`, fixing an issue with the new Rust client. +* Rust sync client: Fix writes made while offline not being uploaded reliably. +* Add watchOS support. ## 1.2.0 -- Add a new sync client implementation written in Rust instead of Kotlin. While this client is still +* Add a new sync client implementation written in Rust instead of Kotlin. While this client is still experimental, we intend to make it the default in the future. The main benefit of this client is - faster sync performance, but upcoming features will also require this client. We encourage + faster sync performance, but upcoming features will also require this client. We encourage interested users to try it out by opting in to `ExperimentalPowerSyncAPI` and passing options when connecting: ```Kotlin @@ -39,7 +40,7 @@ ``` Switching between the clients can be done at any time without compatibility issues. If you run into issues with the new client, please reach out to us! -- In addition to HTTP streams, the Kotlin SDK also supports fetching sync instructions from the +* In addition to HTTP streams, the Kotlin SDK also supports fetching sync instructions from the PowerSync service in a binary format. This requires the new sync client, and can then be enabled on the sync options: ```Kotlin @@ -49,76 +50,76 @@ method = ConnectionMethod.WebSocket() )) ``` -- [Android, JVM] Use version `0.4.0` of `powersync-sqlite-core`. +* [Android, JVM] Use version `0.4.0` of `powersync-sqlite-core`. ## 1.1.1 -- Fix reported progress around compactions / defrags on the sync service. -- [Android] Set `temp_store_directory`, avoiding crashes for large materialized views. +* Fix reported progress around compactions / defrags on the sync service. +* [Android] Set `temp_store_directory`, avoiding crashes for large materialized views. ## 1.1.0 -- Add `trackPreviousValues` option on `Table` which sets `CrudEntry.previousValues` to previous values on updates. -- Add `trackMetadata` option on `Table` which adds a `_metadata` column that can be used for updates. +* Add `trackPreviousValues` option on `Table` which sets `CrudEntry.previousValues` to previous values on updates. +* Add `trackMetadata` option on `Table` which adds a `_metadata` column that can be used for updates. The configured metadata is available through `CrudEntry.metadata`. -- Add `ignoreEmptyUpdates` option which skips creating CRUD entries for updates that don't change any values. +* Add `ignoreEmptyUpdates` option which skips creating CRUD entries for updates that don't change any values. ## 1.0.1 -- [Internal] Version bump for broken Swift release pipeline +* [Internal] Version bump for broken Swift release pipeline ## 1.0.0 -- Bump SDK to V1/Stable feature status -- Fixed `CrudBatch` `hasMore` always returning false. -- Added `triggerImmediately` to `onChange` method. -- Report real-time progress information about downloads through `SyncStatus.downloadProgress`. -- Compose: Add `composeState()` extension method on `SyncStatus`. -- [Internal] Added helper method for Swift `PowerSyncException` throwing. +* Bump SDK to V1/Stable feature status +* Fixed `CrudBatch` `hasMore` always returning false. +* Added `triggerImmediately` to `onChange` method. +* Report real-time progress information about downloads through `SyncStatus.downloadProgress`. +* Compose: Add `composeState()` extension method on `SyncStatus`. +* [Internal] Added helper method for Swift `PowerSyncException` throwing. ## 1.0.0-BETA32 -- Added `onChange` method to the PowerSync client. This allows for observing table changes. -- Removed unnecessary `User-Id` header from internal PowerSync service requests. -- Fix loading native PowerSync extension for Java targets. +* Added `onChange` method to the PowerSync client. This allows for observing table changes. +* Removed unnecessary `User-Id` header from internal PowerSync service requests. +* Fix loading native PowerSync extension for Java targets. ## 1.0.0-BETA31 -- Added helpers for Attachment syncing. -- Fix `getNextCrudTransaction()` only returning a single item. +* Added helpers for Attachment syncing. +* Fix `getNextCrudTransaction()` only returning a single item. ## 1.0.0-BETA30 -- Fix a deadlock when calling `connect()` immediately after opening a database. +* Fix a deadlock when calling `connect()` immediately after opening a database. The issue has been introduced in version `1.0.0-BETA29`. ## 1.0.0-BETA29 -- Fix potential race condition between jobs in `connect()` and `disconnect()`. -- [JVM Windows] Fixed PowerSync Extension temporary file deletion error on process shutdown. -- [iOS] Fixed issue where automatic driver migrations would fail with the error: +* Fix potential race condition between jobs in `connect()` and `disconnect()`. +* [JVM Windows] Fixed PowerSync Extension temporary file deletion error on process shutdown. +* [iOS] Fixed issue where automatic driver migrations would fail with the error: ``` Sqlite operation failure database is locked attempted to run migration and failed. closing connection ``` -- Fix race condition causing data received during uploads not to be applied. +* Fix race condition causing data received during uploads not to be applied. ## 1.0.0-BETA28 -- Update PowerSync SQLite core extension to 0.3.12. -- Added queing protection and warnings when connecting multiple PowerSync clients to the same +* Update PowerSync SQLite core extension to 0.3.12. +* Added queing protection and warnings when connecting multiple PowerSync clients to the same database file. -- Improved concurrent SQLite connection support accross various platforms. All platforms now use a +* Improved concurrent SQLite connection support accross various platforms. All platforms now use a single write connection and multiple read connections for concurrent read queries. -- Added the ability to open a SQLite database given a custom `dbDirectory` path. This is currently +* Added the ability to open a SQLite database given a custom `dbDirectory` path. This is currently not supported on iOS due to internal driver restrictions. -- Internaly improved the linking of SQLite for iOS. -- Enabled Full Text Search on iOS platforms. -- Added the ability to update the schema for existing PowerSync clients. -- Fixed bug where local only, insert only and view name overrides were not applied for schema +* Internaly improved the linking of SQLite for iOS. +* Enabled Full Text Search on iOS platforms. +* Added the ability to update the schema for existing PowerSync clients. +* Fixed bug where local only, insert only and view name overrides were not applied for schema tables. -- The Android SQLite driver now uses +* The Android SQLite driver now uses the [Xerial JDBC library](https://github.com/xerial/sqlite-jdbc). This removes the requirement for users to add the jitpack Maven repository to their projects. @@ -135,52 +136,52 @@ Sqlite operation failure database is locked attempted to run migration and faile ## 1.0.0-BETA27 -- Improved watch query internals. Added the ability to throttle watched queries. -- Fixed `uploading` and `downloading` sync status indicators. +* Improved watch query internals. Added the ability to throttle watched queries. +* Fixed `uploading` and `downloading` sync status indicators. ## 1.0.0-BETA26 -- Support bucket priorities and partial syncs. -- Android: Add ProGuard rules to prevent methods called through JNI from being minified or removed. +* Support bucket priorities and partial syncs. +* Android: Add ProGuard rules to prevent methods called through JNI from being minified or removed. ## 1.0.0-BETA25 -- JVM: Lower minimum supported version from 17 to 8. +* JVM: Lower minimum supported version from 17 to 8. ## 1.0.0-BETA24 -- Improve internal handling of watch queries to avoid issues where updates are not being received +* Improve internal handling of watch queries to avoid issues where updates are not being received due to transaction commits occurring after the query is run. -- Fix issue in JVM build where `columnNames` was throwing an error due to the index of the JDBC +* Fix issue in JVM build where `columnNames` was throwing an error due to the index of the JDBC driver starting at 1 instead of 0 as in the other drivers/ -- Throw and not just catch `CancellationExceptions` in `runWrappedSuspending` +* Throw and not just catch `CancellationExceptions` in `runWrappedSuspending` ## 1.0.0-BETA23 -- Make `execute` and `PowerSyncTransaction` functions throwable for Swift +* Make `execute` and `PowerSyncTransaction` functions throwable for Swift ## 1.0.0-BETA22 -- Fix `updateHasSynced` internal null pointer exception +* Fix `updateHasSynced` internal null pointer exception ## 1.0.0-BETA21 -- Improve error handling for Swift by adding @Throws annotation so errors can be handled in Swift -- Throw PowerSync exceptions for all public facing methods +* Improve error handling for Swift by adding @Throws annotation so errors can be handled in Swift +* Throw PowerSync exceptions for all public facing methods ## 1.0.0-BETA20 -- Add cursor optional functions: `getStringOptional`, `getLongOptional`, `getDoubleOptional`, +* Add cursor optional functions: `getStringOptional`, `getLongOptional`, `getDoubleOptional`, `getBooleanOptional` and `getBytesOptional` when using the column name which allow for optional return types -- Throw errors for invalid column on all cursor functions -- `getString`, `getLong`, `getBytes`, `getDouble` and `getBoolean` used with the column name will +* Throw errors for invalid column on all cursor functions +* `getString`, `getLong`, `getBytes`, `getDouble` and `getBoolean` used with the column name will now throw an error for non-null values and expect a non optional return type ## 1.0.0-BETA19 -- Allow cursor to get values by column name e.g. `getStringOptional("id")` -- BREAKING CHANGE: If you were using `SqlCursor` from SqlDelight previously for your own custom +* Allow cursor to get values by column name e.g. `getStringOptional("id")` +* BREAKING CHANGE: If you were using `SqlCursor` from SqlDelight previously for your own custom mapper then you must now change to `SqlCursor` exported by the PowerSync module. Previously you would import it like this: @@ -197,80 +198,80 @@ Sqlite operation failure database is locked attempted to run migration and faile ## 1.0.0-BETA18 -- BREAKING CHANGE: Move from async sqldelight calls to synchronous calls. This will only affect +* BREAKING CHANGE: Move from async sqldelight calls to synchronous calls. This will only affect `readTransaction` and `writeTransaction`where the callback function is no longer asynchronous. ## 1.0.0-BETA17 -- Add fix for Windows using JVM build +* Add fix for Windows using JVM build ## 1.0.0-BETA16 -- Add `close` method to database methods -- Throw when error is a `CancellationError` and remove invalidation for all errors in +* Add `close` method to database methods +* Throw when error is a `CancellationError` and remove invalidation for all errors in `streamingSync` catch. ## 1.0.0-BETA15 -- Update powersync-sqlite-core to 0.3.8 -- Increase maximum amount of columns from 63 to 1999 +* Update powersync-sqlite-core to 0.3.8 +* Increase maximum amount of columns from 63 to 1999 ## 1.0.0-BETA14 -- Add JVM compatibility -- Revert previous iOS changes as they resulted in further issues. +* Add JVM compatibility +* Revert previous iOS changes as they resulted in further issues. ## 1.0.0-BETA13 -- Move iOS database driver to use IO dispatcher which should avoid race conditions and improve +* Move iOS database driver to use IO dispatcher which should avoid race conditions and improve performance. ## 1.0.0-BETA12 -- Use transaction context in `writeTransaction` in `BucketStorageImpl`. +* Use transaction context in `writeTransaction` in `BucketStorageImpl`. ## 1.0.0-BETA11 -- Update version to fix deployment issue of previous release +* Update version to fix deployment issue of previous release ## 1.0.0-BETA10 -- Change Swift package name from `PowerSync` to `PowerSyncKotlin` +* Change Swift package name from `PowerSync` to `PowerSyncKotlin` ## 1.0.0-BETA9 -- Re-enable SKIE `SuspendInterop` -- Move transaction functions out of `PowerSyncTransactionFactory` to avoid threading issues in Swift +* Re-enable SKIE `SuspendInterop` +* Move transaction functions out of `PowerSyncTransactionFactory` to avoid threading issues in Swift SDK ## 1.0.0-BETA8 -- Disable SKIE `SuspendInterop` plugin to fix overriding `suspend` functions in Swift +* Disable SKIE `SuspendInterop` plugin to fix overriding `suspend` functions in Swift ## 1.0.0-BETA7 -- Update supabase connector to use supabase-kt version 3 -- Handle Postgres error codes in supabase connector +* Update supabase connector to use supabase-kt version 3 +* Handle Postgres error codes in supabase connector ## 1.0.0-BETA6 -- Fix Custom Write Checkpoint application logic +* Fix Custom Write Checkpoint application logic ## 1.0.0-BETA5 -- Fix `hasSynced` not updating after `disconnectAndClear` -- Fix error being thrown in iOS app launch +* Fix `hasSynced` not updating after `disconnectAndClear` +* Fix error being thrown in iOS app launch ## 1.0.0-BETA4 -- Fix sync status being reset when `update` function is run +* Fix sync status being reset when `update` function is run ## 1.0.0-BETA3 -- Add `waitForFirstSync` function - which resolves after the initial sync is completed -- Upgrade to Kotlin 2.0.20 - should not cause any issues with users who are still on Kotlin 1.9 -- Upgrade `powersync-sqlite-core` to 0.3.0 - improves incremental sync performance -- Add client sync parameters - which allows you specify sync parameters from the +* Add `waitForFirstSync` function - which resolves after the initial sync is completed +* Upgrade to Kotlin 2.0.20 - should not cause any issues with users who are still on Kotlin 1.9 +* Upgrade `powersync-sqlite-core` to 0.3.0 - improves incremental sync performance +* Add client sync parameters - which allows you specify sync parameters from the client https://docs.powersync.com/usage/sync-rules/advanced-topics/client-parameters-beta ```kotlin @@ -288,25 +289,25 @@ params = params ) ``` -- Add schema validation when schema is generated -- Add warning message if there is a crudItem in the queue that has not yet been synced and after a +* Add schema validation when schema is generated +* Add warning message if there is a crudItem in the queue that has not yet been synced and after a delay rerun the upload ## 1.0.0-BETA2 -- Publish persistence package +* Publish persistence package ## 1.0.0-BETA1 -- Improve API by changing from Builder pattern to simply instantiating the database +* Improve API by changing from Builder pattern to simply instantiating the database `PowerSyncDatabase` E.g. `val db = PowerSyncDatabase(factory, schema)` -- Use callback context in transactions +* Use callback context in transactions E.g. `db.writeTransaction{ ctx -> ctx.execute(...) }` -- Removed unnecessary expiredAt field -- Added table max column validation as there is a hard limit of 63 columns -- Moved SQLDelight models to a separate module to reduce export size -- Replaced default Logger with [Kermit Logger](https://kermit.touchlab.co/) which allows users to +* Removed unnecessary expiredAt field +* Added table max column validation as there is a hard limit of 63 columns +* Moved SQLDelight models to a separate module to reduce export size +* Replaced default Logger with [Kermit Logger](https://kermit.touchlab.co/) which allows users to more easily use and/or change Logger settings -- Add `retryDelay` and `crudThrottle` options when setting up database connection -- Changed `_viewNameOverride` to `viewNameOverride` +* Add `retryDelay` and `crudThrottle` options when setting up database connection +* Changed `_viewNameOverride` to `viewNameOverride` \ No newline at end of file From 2911eb71b98f148eace8e7fbcf163a4a9bfe8bd7 Mon Sep 17 00:00:00 2001 From: Yahya JIRARI Date: Thu, 17 Jul 2025 12:06:44 +0200 Subject: [PATCH 3/5] feat(getNextCrudTransactionBatch): revert unwanted formatting changes to CHANGELOG.md --- CHANGELOG.md | 106 +++++++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2f53cdf..bfc7f2bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ * Add `getNextCrudTransactionBatch` method to `PowerSyncDatabase` which allows for fetching a batch of CRUD operations to upload. This is useful for uploading multiple transactions in a single batch. - + ## 1.3.0 * Support tables created outside of PowerSync with the `RawTable` API. @@ -28,7 +28,7 @@ * Add a new sync client implementation written in Rust instead of Kotlin. While this client is still experimental, we intend to make it the default in the future. The main benefit of this client is - faster sync performance, but upcoming features will also require this client. We encourage + faster sync performance, but upcoming features will also require this client. We encourage interested users to try it out by opting in to `ExperimentalPowerSyncAPI` and passing options when connecting: ```Kotlin @@ -49,7 +49,7 @@ method = ConnectionMethod.WebSocket() )) ``` -* [Android, JVM] Use version `0.4.0` of `powersync*sqlite*core`. +* [Android, JVM] Use version `0.4.0` of `powersync-sqlite-core`. ## 1.1.1 @@ -72,27 +72,27 @@ * Bump SDK to V1/Stable feature status * Fixed `CrudBatch` `hasMore` always returning false. * Added `triggerImmediately` to `onChange` method. -* Report real*time progress information about downloads through `SyncStatus.downloadProgress`. +* Report real-time progress information about downloads through `SyncStatus.downloadProgress`. * Compose: Add `composeState()` extension method on `SyncStatus`. * [Internal] Added helper method for Swift `PowerSyncException` throwing. -## 1.0.0*BETA32 +## 1.0.0-BETA32 * Added `onChange` method to the PowerSync client. This allows for observing table changes. -* Removed unnecessary `User*Id` header from internal PowerSync service requests. +* Removed unnecessary `User-Id` header from internal PowerSync service requests. * Fix loading native PowerSync extension for Java targets. -## 1.0.0*BETA31 +## 1.0.0-BETA31 * Added helpers for Attachment syncing. * Fix `getNextCrudTransaction()` only returning a single item. -## 1.0.0*BETA30 +## 1.0.0-BETA30 * Fix a deadlock when calling `connect()` immediately after opening a database. - The issue has been introduced in version `1.0.0*BETA29`. + The issue has been introduced in version `1.0.0-BETA29`. -## 1.0.0*BETA29 +## 1.0.0-BETA29 * Fix potential race condition between jobs in `connect()` and `disconnect()`. * [JVM Windows] Fixed PowerSync Extension temporary file deletion error on process shutdown. @@ -104,7 +104,7 @@ Sqlite operation failure database is locked attempted to run migration and faile * Fix race condition causing data received during uploads not to be applied. -## 1.0.0*BETA28 +## 1.0.0-BETA28 * Update PowerSync SQLite core extension to 0.3.12. * Added queing protection and warnings when connecting multiple PowerSync clients to the same @@ -119,35 +119,35 @@ Sqlite operation failure database is locked attempted to run migration and faile * Fixed bug where local only, insert only and view name overrides were not applied for schema tables. * The Android SQLite driver now uses - the [Xerial JDBC library](https://github.com/xerial/sqlite*jdbc). This removes the requirement for + the [Xerial JDBC library](https://github.com/xerial/sqlite-jdbc). This removes the requirement for users to add the jitpack Maven repository to their projects. ```diff // settings.gradle.kts example repositories { google() -* maven("https://jitpack.io") { -* content { includeGroup("com.github.requery") } -* } +- maven("https://jitpack.io") { +- content { includeGroup("com.github.requery") } +- } mavenCentral() } ``` -## 1.0.0*BETA27 +## 1.0.0-BETA27 * Improved watch query internals. Added the ability to throttle watched queries. * Fixed `uploading` and `downloading` sync status indicators. -## 1.0.0*BETA26 +## 1.0.0-BETA26 * Support bucket priorities and partial syncs. * Android: Add ProGuard rules to prevent methods called through JNI from being minified or removed. -## 1.0.0*BETA25 +## 1.0.0-BETA25 * JVM: Lower minimum supported version from 17 to 8. -## 1.0.0*BETA24 +## 1.0.0-BETA24 * Improve internal handling of watch queries to avoid issues where updates are not being received due to transaction commits occurring after the query is run. @@ -155,29 +155,29 @@ Sqlite operation failure database is locked attempted to run migration and faile driver starting at 1 instead of 0 as in the other drivers/ * Throw and not just catch `CancellationExceptions` in `runWrappedSuspending` -## 1.0.0*BETA23 +## 1.0.0-BETA23 * Make `execute` and `PowerSyncTransaction` functions throwable for Swift -## 1.0.0*BETA22 +## 1.0.0-BETA22 * Fix `updateHasSynced` internal null pointer exception -## 1.0.0*BETA21 +## 1.0.0-BETA21 * Improve error handling for Swift by adding @Throws annotation so errors can be handled in Swift * Throw PowerSync exceptions for all public facing methods -## 1.0.0*BETA20 +## 1.0.0-BETA20 * Add cursor optional functions: `getStringOptional`, `getLongOptional`, `getDoubleOptional`, `getBooleanOptional` and `getBytesOptional` when using the column name which allow for optional return types * Throw errors for invalid column on all cursor functions * `getString`, `getLong`, `getBytes`, `getDouble` and `getBoolean` used with the column name will - now throw an error for non*null values and expect a non optional return type + now throw an error for non-null values and expect a non optional return type -## 1.0.0*BETA19 +## 1.0.0-BETA19 * Allow cursor to get values by column name e.g. `getStringOptional("id")` * BREAKING CHANGE: If you were using `SqlCursor` from SqlDelight previously for your own custom @@ -195,83 +195,83 @@ Sqlite operation failure database is locked attempted to run migration and faile import com.powersync.db.SqlCursor ``` -## 1.0.0*BETA18 +## 1.0.0-BETA18 * BREAKING CHANGE: Move from async sqldelight calls to synchronous calls. This will only affect `readTransaction` and `writeTransaction`where the callback function is no longer asynchronous. -## 1.0.0*BETA17 +## 1.0.0-BETA17 * Add fix for Windows using JVM build -## 1.0.0*BETA16 +## 1.0.0-BETA16 * Add `close` method to database methods * Throw when error is a `CancellationError` and remove invalidation for all errors in `streamingSync` catch. -## 1.0.0*BETA15 +## 1.0.0-BETA15 -* Update powersync*sqlite*core to 0.3.8 +* Update powersync-sqlite-core to 0.3.8 * Increase maximum amount of columns from 63 to 1999 -## 1.0.0*BETA14 +## 1.0.0-BETA14 * Add JVM compatibility * Revert previous iOS changes as they resulted in further issues. -## 1.0.0*BETA13 +## 1.0.0-BETA13 * Move iOS database driver to use IO dispatcher which should avoid race conditions and improve performance. -## 1.0.0*BETA12 +## 1.0.0-BETA12 * Use transaction context in `writeTransaction` in `BucketStorageImpl`. -## 1.0.0*BETA11 +## 1.0.0-BETA11 * Update version to fix deployment issue of previous release -## 1.0.0*BETA10 +## 1.0.0-BETA10 * Change Swift package name from `PowerSync` to `PowerSyncKotlin` -## 1.0.0*BETA9 +## 1.0.0-BETA9 -* Re*enable SKIE `SuspendInterop` +* Re-enable SKIE `SuspendInterop` * Move transaction functions out of `PowerSyncTransactionFactory` to avoid threading issues in Swift SDK -## 1.0.0*BETA8 +## 1.0.0-BETA8 * Disable SKIE `SuspendInterop` plugin to fix overriding `suspend` functions in Swift -## 1.0.0*BETA7 +## 1.0.0-BETA7 -* Update supabase connector to use supabase*kt version 3 +* Update supabase connector to use supabase-kt version 3 * Handle Postgres error codes in supabase connector -## 1.0.0*BETA6 +## 1.0.0-BETA6 * Fix Custom Write Checkpoint application logic -## 1.0.0*BETA5 +## 1.0.0-BETA5 * Fix `hasSynced` not updating after `disconnectAndClear` * Fix error being thrown in iOS app launch -## 1.0.0*BETA4 +## 1.0.0-BETA4 * Fix sync status being reset when `update` function is run -## 1.0.0*BETA3 +## 1.0.0-BETA3 -* Add `waitForFirstSync` function * which resolves after the initial sync is completed -* Upgrade to Kotlin 2.0.20 * should not cause any issues with users who are still on Kotlin 1.9 -* Upgrade `powersync*sqlite*core` to 0.3.0 * improves incremental sync performance -* Add client sync parameters * which allows you specify sync parameters from the - client https://docs.powersync.com/usage/sync*rules/advanced*topics/client*parameters*beta +* Add `waitForFirstSync` function - which resolves after the initial sync is completed +* Upgrade to Kotlin 2.0.20 - should not cause any issues with users who are still on Kotlin 1.9 +* Upgrade `powersync-sqlite-core` to 0.3.0 - improves incremental sync performance +* Add client sync parameters - which allows you specify sync parameters from the + client https://docs.powersync.com/usage/sync-rules/advanced-topics/client-parameters-beta ```kotlin val params = JsonParam.Map( @@ -292,21 +292,21 @@ params = params * Add warning message if there is a crudItem in the queue that has not yet been synced and after a delay rerun the upload -## 1.0.0*BETA2 +## 1.0.0-BETA2 * Publish persistence package -## 1.0.0*BETA1 +## 1.0.0-BETA1 * Improve API by changing from Builder pattern to simply instantiating the database `PowerSyncDatabase` E.g. `val db = PowerSyncDatabase(factory, schema)` * Use callback context in transactions - E.g. `db.writeTransaction{ ctx *> ctx.execute(...) }` + E.g. `db.writeTransaction{ ctx -> ctx.execute(...) }` * Removed unnecessary expiredAt field * Added table max column validation as there is a hard limit of 63 columns * Moved SQLDelight models to a separate module to reduce export size * Replaced default Logger with [Kermit Logger](https://kermit.touchlab.co/) which allows users to more easily use and/or change Logger settings * Add `retryDelay` and `crudThrottle` options when setting up database connection -* Changed `_viewNameOverride` to `viewNameOverride` +* Changed `_viewNameOverride` to `viewNameOverride` \ No newline at end of file From 5d0b1f4872927c66705a98d42fc48e466e54c6ae Mon Sep 17 00:00:00 2001 From: Yahya JIRARI Date: Fri, 18 Jul 2025 16:48:16 +0200 Subject: [PATCH 4/5] feat(getNextCrudTransactionBatch): add ps_crud tx_id & (tx_id, id) index --- .../src/commonMain/sqldelight/com/persistence/Powersync.sq | 3 +++ 1 file changed, 3 insertions(+) diff --git a/persistence/src/commonMain/sqldelight/com/persistence/Powersync.sq b/persistence/src/commonMain/sqldelight/com/persistence/Powersync.sq index da30fd6b..38019fe6 100644 --- a/persistence/src/commonMain/sqldelight/com/persistence/Powersync.sq +++ b/persistence/src/commonMain/sqldelight/com/persistence/Powersync.sq @@ -28,6 +28,9 @@ DELETE FROM ps_crud WHERE id <= ?; -- we can define interal tables as part of the dialect. CREATE TABLE IF NOT EXISTS ps_crud (id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT, tx_id INTEGER); +CREATE INDEX IF NOT EXISTS idx_ps_crud_tx_id ON ps_crud(tx_id); +CREATE INDEX IF NOT EXISTS idx_ps_crud_tx_id_id ON ps_crud(tx_id, id); + CREATE TABLE ps_buckets( name TEXT PRIMARY KEY, last_applied_op INTEGER NOT NULL DEFAULT 0, From 70bfec7447f08fdcf765805e28463d0e2cdbdc70 Mon Sep 17 00:00:00 2001 From: Yahya JIRARI Date: Fri, 18 Jul 2025 17:07:55 +0200 Subject: [PATCH 5/5] feat(getNextCrudTransactionBatch): use an optional itemLimit instead of transactionLimit, improve performance by using CTEs & window function --- .../kotlin/com/powersync/DatabaseTest.kt | 163 ++++++++++++++++-- .../kotlin/com/powersync/PowerSyncDatabase.kt | 8 +- .../com/powersync/db/PowerSyncDatabaseImpl.kt | 81 ++++++--- 3 files changed, 203 insertions(+), 49 deletions(-) diff --git a/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt b/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt index 4777fe8c..73054ff2 100644 --- a/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt +++ b/core/src/commonIntegrationTest/kotlin/com/powersync/DatabaseTest.kt @@ -22,6 +22,7 @@ import kotlinx.coroutines.withContext import kotlin.test.Test import kotlin.test.assertEquals import kotlin.test.assertNotNull +import kotlin.time.TimeSource @OptIn(ExperimentalKermitApi::class) class DatabaseTest { @@ -503,20 +504,20 @@ class DatabaseTest { ) } - // Test with limit of 2 transactions - var batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + // Test with item limit of 3 - should get first transaction (1 item) + second transaction (2 items) + var batch = database.getNextCrudTransactionBatch(limit = 3) ?: error("Batch should not be null") batch.hasMore shouldBe true batch.crud shouldHaveSize 3 // 1 entry from transaction 1 + 2 entries from transaction 2 batch.complete(null) - // Test with limit of 1 transaction - batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + // Test with item limit of 2 - should get third transaction (1 item) only + batch = database.getNextCrudTransactionBatch(limit = 2) ?: error("Batch should not be null") batch.hasMore shouldBe true batch.crud shouldHaveSize 1 // 1 entry from transaction 3 batch.complete(null) - // Test with large limit that covers remaining transactions - batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + // Test with no limit - should get remaining transactions + batch = database.getNextCrudTransactionBatch() ?: error("Batch should not be null") batch.hasMore shouldBe false batch.crud shouldHaveSize 3 // 3 entries from transaction 4 batch.complete(null) @@ -543,13 +544,13 @@ class DatabaseTest { ) // Each NULL tx_id operation should be treated as its own transaction - var batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + var batch = database.getNextCrudTransactionBatch(limit = 2) ?: error("Batch should not be null") batch.hasMore shouldBe true batch.crud shouldHaveSize 2 // 2 individual transactions batch.complete(null) // Get the remaining transaction - batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + batch = database.getNextCrudTransactionBatch() ?: error("Batch should not be null") batch.hasMore shouldBe false batch.crud shouldHaveSize 1 // 1 remaining transaction batch.complete(null) @@ -576,14 +577,14 @@ class DatabaseTest { listOf("single", "single@example.org"), ) - // Should get the entire large transaction (10 operations) in one batch - var batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + // Should get entire large transaction (10 operations) - at least one transaction rule + var batch = database.getNextCrudTransactionBatch(limit = 5) ?: error("Batch should not be null") batch.hasMore shouldBe true batch.crud shouldHaveSize 10 batch.complete(null) // Should get the single operation - batch = database.getNextCrudTransactionBatch(1) ?: error("Batch should not be null") + batch = database.getNextCrudTransactionBatch() ?: error("Batch should not be null") batch.hasMore shouldBe false batch.crud shouldHaveSize 1 batch.complete(null) @@ -617,7 +618,7 @@ class DatabaseTest { ) // Operations should be processed in order - val batch = database.getNextCrudTransactionBatch(10) ?: error("Batch should not be null") + val batch = database.getNextCrudTransactionBatch() ?: error("Batch should not be null") batch.hasMore shouldBe false batch.crud shouldHaveSize 4 @@ -640,7 +641,7 @@ class DatabaseTest { } @Test - fun testCrudTransactionBatchZeroLimit() = + fun testCrudTransactionBatchZerolimit() = databaseTest { // Create some operations database.execute( @@ -648,13 +649,13 @@ class DatabaseTest { listOf("a", "a@example.org"), ) - // Zero limit should return null even if operations exist - val batch = database.getNextCrudTransactionBatch(0) + // Item limit of 0 should return null even if operations exist + val batch = database.getNextCrudTransactionBatch(limit = 0) batch shouldBe null } @Test - fun testCrudTransactionBatchShouldCountTransactionsNotOperations() = + fun testCrudTransactionBatchGroupsByTransaction() = databaseTest { // Create a transaction with 3 operations database.writeTransaction { @@ -678,12 +679,138 @@ class DatabaseTest { listOf("single", "single@example.org"), ) - // Request limit of 2 transactions - should get all 4 operations (3 from tx + 1 single) - val batch = database.getNextCrudTransactionBatch(2) ?: error("Batch should not be null") + // Request with no limit - should get all 4 operations (3 from tx + 1 single) + val batch = database.getNextCrudTransactionBatch() ?: error("Batch should not be null") batch.hasMore shouldBe false batch.crud shouldHaveSize 4 batch.complete(null) database.getNextCrudTransactionBatch() shouldBe null } + + @Test + fun testCrudTransactionBatchWithlimit() = + databaseTest { + // Create a transaction with 5 operations + database.writeTransaction { + repeat(5) { i -> + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user$i", "user$i@example.org"), + ) + } + } + + // Create another transaction with 3 operations + database.writeTransaction { + repeat(3) { i -> + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user2_$i", "user2_$i@example.org"), + ) + } + } + + // Add a single operation (NULL tx_id) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("single", "single@example.org"), + ) + + // Test with item limit of 6 - should get first transaction (5 items) only + var batch = database.getNextCrudTransactionBatch(limit = 6) ?: error("Batch should not be null") + batch.hasMore shouldBe true + batch.crud shouldHaveSize 5 + batch.complete(null) + + // Test with item limit of 4 - should get second transaction (3 items) + single operation (1 item) + batch = database.getNextCrudTransactionBatch(limit = 4) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 4 + batch.complete(null) + + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchlimitReturnsAtLeastOneTransaction() = + databaseTest { + // Create a transaction with 10 operations + database.writeTransaction { + repeat(10) { i -> + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user$i", "user$i@example.org"), + ) + } + } + + // With item limit of 5, should return entire transaction (10 items) - at least one transaction rule, even if it exceeds the limit + var batch = database.getNextCrudTransactionBatch(limit = 5) ?: error("Batch should not be null") + batch.hasMore shouldBe false + batch.crud shouldHaveSize 10 + batch.complete(null) + + database.getNextCrudTransactionBatch() shouldBe null + } + + @Test + fun testCrudTransactionBatchPerformanceBenchmark() = + databaseTest { + // Create a large number of transactions with varying sizes + val totalOperations = 100_000 + val transactionSizes = listOf(1, 3, 5, 10, 20, 50, 100, 200, 500, 1000) + var operationCount = 0 + + while (operationCount < totalOperations) { + val size = transactionSizes.random() + val remainingOps = totalOperations - operationCount + val actualSize = minOf(size, remainingOps) + + if (actualSize == 1) { + // Single operation (NULL tx_id) + database.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user$operationCount", "user$operationCount@example.org"), + ) + } else { + // Transaction with multiple operations + database.writeTransaction { + repeat(actualSize) { i -> + it.execute( + "INSERT INTO users (id, name, email) VALUES (uuid(), ?, ?)", + listOf("user${operationCount + i}", "user${operationCount + i}@example.org"), + ) + } + } + } + operationCount += actualSize + } + + val startTime = + kotlin.time.TimeSource.Monotonic + .markNow() + var totalBatches = 0 + var totalOperationsProcessed = 0 + + while (true) { + val batch = database.getNextCrudTransactionBatch(limit = 100) + if (batch == null) break + + totalBatches++ + totalOperationsProcessed += batch.crud.size + batch.complete(null) + } + + val elapsedTime = startTime.elapsedNow() + + totalOperationsProcessed shouldBe totalOperations + + println("Benchmark Results:") + println("Total operations: $totalOperations") + println("Total batches: $totalBatches") + println("Average operations per batch: ${totalOperationsProcessed.toDouble() / totalBatches}") + println("Processing time: ${elapsedTime.inWholeMilliseconds}ms") + println("Operations per second: ${(totalOperations * 1000.0 / elapsedTime.inWholeMilliseconds).toInt()}") + } } diff --git a/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt b/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt index 3de07a8e..18f3f22a 100644 --- a/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt +++ b/core/src/commonMain/kotlin/com/powersync/PowerSyncDatabase.kt @@ -147,11 +147,13 @@ public interface PowerSyncDatabase : Queries { * Unlike [getCrudBatch], this groups data by transaction, allowing developers to * upload multiple complete transactions in a single batch operation. * - * @param transactionLimit The maximum number of transactions to include in the batch. - * Default is 5. + * @param limit The maximum number of crud items to include in the batch. + * If null, no item limit is applied. Returns the maximum number of complete + * transactions that fit within the item limit, but always returns at least + * one complete transaction even if its number of operations exceeds the limit. */ @Throws(PowerSyncException::class, CancellationException::class) - public suspend fun getNextCrudTransactionBatch(transactionLimit: Int = 10): CrudBatch? + public suspend fun getNextCrudTransactionBatch(limit: Int? = null): CrudBatch? /** * Convenience method to get the current version of PowerSync. diff --git a/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt b/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt index d30d3547..36b72ca0 100644 --- a/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt +++ b/core/src/commonMain/kotlin/com/powersync/db/PowerSyncDatabaseImpl.kt @@ -309,14 +309,54 @@ internal class PowerSyncDatabaseImpl( } } - override suspend fun getNextCrudTransactionBatch(transactionLimit: Int): CrudBatch? { + override suspend fun getNextCrudTransactionBatch(limit: Int?): CrudBatch? { waitReady() + + if (limit == 0) { + return null + } + return internalDb.readTransaction { transaction -> - // Since tx_id can be null, we can't use a WHERE tx_id < ? with transactionLimit + first crud entry tx_id - // So we get all operations and group them by transaction or fall back to an individual transaction if tx_id is null - val allOperations = + val result = transaction.getAll( - "SELECT id, tx_id, data FROM ps_crud ORDER BY id ASC", + """ + WITH all_operations AS ( + -- Compute transaction group once and reuse throughout + SELECT + id, + tx_id, + data, + COALESCE(tx_id, id) as transaction_group + FROM ps_crud + ), + transaction_groups AS ( + SELECT + transaction_group, + MIN(id) as first_operation_id, + COUNT(*) as operation_count + FROM all_operations + GROUP BY transaction_group + ), + transaction_with_running_totals AS ( + SELECT + transaction_group, + first_operation_id, + operation_count, + ROW_NUMBER() OVER (ORDER BY first_operation_id) as transaction_rank, + SUM(operation_count) OVER (ORDER BY first_operation_id ROWS UNBOUNDED PRECEDING) as running_total + FROM transaction_groups + ), + selected_transactions AS ( + SELECT transaction_group + FROM transaction_with_running_totals + WHERE ? IS NULL OR running_total <= ? OR transaction_rank = 1 + ) + SELECT ao.id, ao.tx_id, ao.data + FROM all_operations ao + INNER JOIN selected_transactions st ON ao.transaction_group = st.transaction_group + ORDER BY ao.id + """, + listOf(limit?.toLong(), limit?.toLong()), ) { cursor -> CrudEntry.fromRow( CrudRow( @@ -327,39 +367,24 @@ internal class PowerSyncDatabaseImpl( ) } - val result = mutableListOf() - val processedTransactions = mutableSetOf() - var transactionCount = 0 - - for (operation in allOperations) { - if (transactionCount >= transactionLimit) break - - val txId = operation.transactionId - if (txId == null) { - // NULL tx_id operations are individual transactions - result.add(operation) - transactionCount++ - } else if (txId !in processedTransactions) { - val transactionOperations = bucketStorage.getCrudItemsByTransactionId(txId, transaction) - result.addAll(transactionOperations) - processedTransactions.add(txId) - transactionCount++ - } - } - if (result.isEmpty()) { return@readTransaction null } - val hasMore = result.size < allOperations.size - val last = result.last() + val maxOperationId = result.maxOfOrNull { it.clientId } ?: 0 + + val hasMore = + transaction.get( + "SELECT EXISTS(SELECT 1 FROM ps_crud WHERE id > ? LIMIT 1)", + listOf(maxOperationId.toLong()), + ) { it.getLong(0)!! } > 0 return@readTransaction CrudBatch( crud = result, hasMore = hasMore, complete = { writeCheckpoint -> logger.i { "[CrudTransactionBatch::complete] Completing batch with checkpoint $writeCheckpoint" } - handleWriteCheckpoint(last.clientId, writeCheckpoint) + handleWriteCheckpoint(maxOperationId, writeCheckpoint) }, ) }