diff --git a/benchmark/app/src/main/AndroidManifest.xml b/benchmark/app/src/main/AndroidManifest.xml
index b57cd039789..4ed1e78772d 100644
--- a/benchmark/app/src/main/AndroidManifest.xml
+++ b/benchmark/app/src/main/AndroidManifest.xml
@@ -1,7 +1,7 @@
-
+
@@ -17,4 +17,4 @@
-
\ No newline at end of file
+
diff --git a/benchmark/microbenchmark/build.gradle.kts b/benchmark/microbenchmark/build.gradle.kts
index 840feac0957..c36e6887e3f 100644
--- a/benchmark/microbenchmark/build.gradle.kts
+++ b/benchmark/microbenchmark/build.gradle.kts
@@ -65,6 +65,8 @@ dependencies {
androidTestImplementation(libs.benchmark.junit4)
androidTestImplementation(libs.androidx.test.core)
+ androidTestImplementation("com.apollographql.apollo3:apollo-mockserver")
+ androidTestImplementation("com.apollographql.apollo3:apollo-testing-support")
}
configure {
diff --git a/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreIncubatingTests.kt b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreIncubatingTests.kt
new file mode 100644
index 00000000000..4d20499b193
--- /dev/null
+++ b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreIncubatingTests.kt
@@ -0,0 +1,98 @@
+package com.apollographql.apollo3.benchmark
+
+import androidx.benchmark.junit4.BenchmarkRule
+import androidx.benchmark.junit4.measureRepeated
+import androidx.test.platform.app.InstrumentationRegistry
+import com.apollographql.apollo3.api.json.jsonReader
+import com.apollographql.apollo3.api.parseJsonResponse
+import com.apollographql.apollo3.benchmark.Utils.dbName
+import com.apollographql.apollo3.benchmark.Utils.operationBasedQuery
+import com.apollographql.apollo3.benchmark.Utils.resource
+import com.apollographql.apollo3.benchmark.test.R
+import com.apollographql.apollo3.cache.normalized.incubating.ApolloStore
+import com.apollographql.apollo3.cache.normalized.incubating.api.CacheKeyGenerator
+import com.apollographql.apollo3.cache.normalized.incubating.api.CacheResolver
+import com.apollographql.apollo3.cache.normalized.incubating.api.FieldPolicyCacheResolver
+import com.apollographql.apollo3.cache.normalized.incubating.api.MemoryCacheFactory
+import com.apollographql.apollo3.cache.normalized.incubating.api.NormalizedCacheFactory
+import com.apollographql.apollo3.cache.normalized.incubating.api.TypePolicyCacheKeyGenerator
+import com.apollographql.apollo3.cache.normalized.incubating.sql.SqlNormalizedCacheFactory
+import org.junit.Assert
+import org.junit.Rule
+import org.junit.Test
+import java.lang.reflect.Method
+import java.util.concurrent.Executors
+
+class ApolloStoreIncubatingTests {
+ @get:Rule
+ val benchmarkRule = BenchmarkRule()
+
+ @Test
+ fun concurrentReadWritesMemory() {
+ concurrentReadWrites(MemoryCacheFactory())
+ }
+
+ @Test
+ fun concurrentReadWritesSql() {
+ Utils.dbFile.delete()
+ // Pass context explicitly here because androidx.startup fails due to relocation
+ val cacheFactory = SqlNormalizedCacheFactory(InstrumentationRegistry.getInstrumentation().context, dbName)
+ concurrentReadWrites(cacheFactory)
+ }
+
+ @Test
+ fun concurrentReadWritesMemoryThenSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = MemoryCacheFactory().chain(SqlNormalizedCacheFactory(InstrumentationRegistry.getInstrumentation().context, dbName))
+ concurrentReadWrites(cacheFactory)
+ }
+
+ private fun concurrentReadWrites(cacheFactory: NormalizedCacheFactory) {
+ val apolloStore = createApolloStore(cacheFactory)
+ val query = operationBasedQuery
+ val data = query.parseJsonResponse(resource(R.raw.calendar_response_simple).jsonReader()).data!!
+ val threadPool = Executors.newFixedThreadPool(CONCURRENCY)
+ benchmarkRule.measureRepeated {
+ val futures = (1..CONCURRENCY).map {
+ threadPool.submit {
+ // Let each thread execute a few writes/reads
+ repeat(WORK_LOAD) {
+ apolloStore.writeOperation(query, data)
+ val data2 = apolloStore.readOperation(query)
+ Assert.assertEquals(data, data2)
+ }
+ }
+ }
+ // Wait for all threads to finish
+ futures.forEach { it.get() }
+ }
+ }
+
+ private fun createApolloStore(cacheFactory: NormalizedCacheFactory): ApolloStore {
+ return createApolloStoreMethod.invoke(
+ null,
+ cacheFactory,
+ TypePolicyCacheKeyGenerator,
+ FieldPolicyCacheResolver,
+ ) as ApolloStore
+ }
+
+
+ companion object {
+ private const val CONCURRENCY = 10
+ private const val WORK_LOAD = 5
+
+ /**
+ * There doesn't seem to be a way to relocate Kotlin metadata and kotlin_module files so we rely on reflection to call top-level
+ * methods
+ * See https://discuss.kotlinlang.org/t/what-is-the-proper-way-to-repackage-shade-kotlin-dependencies/10869
+ */
+ private val apolloStoreKtClass = Class.forName("com.apollographql.apollo3.cache.normalized.incubating.ApolloStoreKt")
+ private val createApolloStoreMethod: Method = apolloStoreKtClass.getMethod(
+ "ApolloStore",
+ NormalizedCacheFactory::class.java,
+ CacheKeyGenerator::class.java,
+ CacheResolver::class.java,
+ )
+ }
+}
diff --git a/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreTests.kt b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreTests.kt
new file mode 100644
index 00000000000..20b1f79f4c2
--- /dev/null
+++ b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/ApolloStoreTests.kt
@@ -0,0 +1,73 @@
+package com.apollographql.apollo3.benchmark
+
+import androidx.benchmark.junit4.BenchmarkRule
+import androidx.benchmark.junit4.measureRepeated
+import com.apollographql.apollo3.api.json.jsonReader
+import com.apollographql.apollo3.api.parseJsonResponse
+import com.apollographql.apollo3.benchmark.Utils.dbName
+import com.apollographql.apollo3.benchmark.Utils.operationBasedQuery
+import com.apollographql.apollo3.benchmark.Utils.resource
+import com.apollographql.apollo3.benchmark.test.R
+import com.apollographql.apollo3.cache.normalized.ApolloStore
+import com.apollographql.apollo3.cache.normalized.api.MemoryCacheFactory
+import com.apollographql.apollo3.cache.normalized.api.NormalizedCacheFactory
+import com.apollographql.apollo3.cache.normalized.sql.SqlNormalizedCacheFactory
+import org.junit.Assert
+import org.junit.Rule
+import org.junit.Test
+import java.util.concurrent.Executors
+
+class ApolloStoreTests {
+ @get:Rule
+ val benchmarkRule = BenchmarkRule()
+
+ @Test
+ fun concurrentReadWritesMemory() {
+ concurrentReadWrites(MemoryCacheFactory())
+ }
+
+ @Test
+ fun concurrentReadWritesSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = SqlNormalizedCacheFactory(dbName)
+ concurrentReadWrites(cacheFactory)
+ }
+
+ @Test
+ fun concurrentReadWritesMemoryThenSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = MemoryCacheFactory().chain(SqlNormalizedCacheFactory(dbName))
+ concurrentReadWrites(cacheFactory)
+ }
+
+ private fun concurrentReadWrites(cacheFactory: NormalizedCacheFactory) {
+ val apolloStore = createApolloStore(cacheFactory)
+ val query = operationBasedQuery
+ val data = query.parseJsonResponse(resource(R.raw.calendar_response_simple).jsonReader()).data!!
+ val threadPool = Executors.newFixedThreadPool(CONCURRENCY)
+ benchmarkRule.measureRepeated {
+ val futures = (1..CONCURRENCY).map {
+ threadPool.submit {
+ // Let each thread execute a few writes/reads
+ repeat(WORK_LOAD) {
+ apolloStore.writeOperation(query, data)
+ val data2 = apolloStore.readOperation(query)
+ Assert.assertEquals(data, data2)
+ }
+ }
+ }
+ // Wait for all threads to finish
+ futures.forEach { it.get() }
+ }
+ }
+
+ private fun createApolloStore(cacheFactory: NormalizedCacheFactory): ApolloStore {
+ return ApolloStore(cacheFactory)
+ }
+
+
+ companion object {
+ private const val CONCURRENCY = 10
+ private const val WORK_LOAD = 5
+ }
+}
diff --git a/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIncubatingIntegrationTests.kt b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIncubatingIntegrationTests.kt
new file mode 100644
index 00000000000..2049db227e6
--- /dev/null
+++ b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIncubatingIntegrationTests.kt
@@ -0,0 +1,153 @@
+package com.apollographql.apollo3.benchmark
+
+import androidx.benchmark.junit4.BenchmarkRule
+import androidx.benchmark.junit4.measureRepeated
+import androidx.test.platform.app.InstrumentationRegistry
+import com.apollographql.apollo3.ApolloClient
+import com.apollographql.apollo3.api.json.jsonReader
+import com.apollographql.apollo3.api.parseJsonResponse
+import com.apollographql.apollo3.benchmark.Utils.dbName
+import com.apollographql.apollo3.benchmark.Utils.operationBasedQuery
+import com.apollographql.apollo3.benchmark.Utils.resource
+import com.apollographql.apollo3.benchmark.test.R
+import com.apollographql.apollo3.cache.normalized.FetchPolicy
+import com.apollographql.apollo3.cache.normalized.fetchPolicy
+import com.apollographql.apollo3.cache.normalized.incubating.ApolloStore
+import com.apollographql.apollo3.cache.normalized.incubating.api.CacheKeyGenerator
+import com.apollographql.apollo3.cache.normalized.incubating.api.CacheResolver
+import com.apollographql.apollo3.cache.normalized.incubating.api.FieldPolicyCacheResolver
+import com.apollographql.apollo3.cache.normalized.incubating.api.MemoryCacheFactory
+import com.apollographql.apollo3.cache.normalized.incubating.api.NormalizedCacheFactory
+import com.apollographql.apollo3.cache.normalized.incubating.api.TypePolicyCacheKeyGenerator
+import com.apollographql.apollo3.cache.normalized.incubating.sql.SqlNormalizedCacheFactory
+import com.apollographql.apollo3.mockserver.MockRequestBase
+import com.apollographql.apollo3.mockserver.MockResponse
+import com.apollographql.apollo3.mockserver.MockServer
+import com.apollographql.apollo3.mockserver.MockServerHandler
+import com.apollographql.apollo3.testing.MapTestNetworkTransport
+import com.apollographql.apollo3.testing.registerTestResponse
+import kotlinx.coroutines.joinAll
+import kotlinx.coroutines.launch
+import kotlinx.coroutines.runBlocking
+import org.junit.Rule
+import org.junit.Test
+import java.lang.reflect.Method
+
+class CacheIncubatingIntegrationTests {
+ @get:Rule
+ val benchmarkRule = BenchmarkRule()
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportMemory() {
+ concurrentQueries(MemoryCacheFactory(), withMockServer = false)
+ }
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = SqlNormalizedCacheFactory(InstrumentationRegistry.getInstrumentation().context, dbName)
+ concurrentQueries(cacheFactory, withMockServer = false)
+ }
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportMemoryThenSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = MemoryCacheFactory().chain(SqlNormalizedCacheFactory(InstrumentationRegistry.getInstrumentation().context, dbName))
+ concurrentQueries(cacheFactory, withMockServer = false)
+ }
+
+
+ private fun concurrentQueries(cacheFactory: NormalizedCacheFactory, withMockServer: Boolean) {
+ val mockServer = MockServer.Builder()
+ .handler(
+ object : MockServerHandler {
+ private val mockResponse = MockResponse.Builder()
+ .statusCode(200)
+ .body(resource(R.raw.calendar_response_simple).readByteString())
+ .build()
+
+ override fun handle(request: MockRequestBase): MockResponse {
+ return mockResponse
+ }
+ }
+ )
+ .build()
+
+ val client = ApolloClient.Builder()
+ .let {
+ if (withMockServer) {
+ it.serverUrl(runBlocking { mockServer.url() })
+ } else {
+ it.networkTransport(MapTestNetworkTransport())
+ }
+ }
+ .store(createApolloStore(cacheFactory))
+ .build()
+ if (!withMockServer) {
+ client.registerTestResponse(operationBasedQuery, operationBasedQuery.parseJsonResponse(resource(R.raw.calendar_response_simple).jsonReader()).data!!)
+ }
+
+ benchmarkRule.measureRepeated {
+ runBlocking {
+ (1..CONCURRENCY).map {
+ launch {
+ // Let each job execute a few queries
+ repeat(WORK_LOAD) {
+ client.query(operationBasedQuery).fetchPolicy(FetchPolicy.NetworkOnly).execute().dataOrThrow()
+ client.query(operationBasedQuery).fetchPolicy(FetchPolicy.CacheOnly).execute().dataOrThrow()
+ }
+ }
+ }
+ // Wait for all jobs to finish
+ .joinAll()
+ }
+ }
+ }
+
+ private fun createApolloStore(cacheFactory: NormalizedCacheFactory): ApolloStore {
+ return createApolloStoreMethod.invoke(
+ null,
+ cacheFactory,
+ TypePolicyCacheKeyGenerator,
+ FieldPolicyCacheResolver,
+ ) as ApolloStore
+ }
+
+
+ companion object {
+ private const val CONCURRENCY = 10
+ private const val WORK_LOAD = 8
+
+ /**
+ * There doesn't seem to be a way to relocate Kotlin metadata and kotlin_module files so we rely on reflection to call top-level
+ * methods
+ * See https://discuss.kotlinlang.org/t/what-is-the-proper-way-to-repackage-shade-kotlin-dependencies/10869
+ */
+ private val apolloStoreKtClass = Class.forName("com.apollographql.apollo3.cache.normalized.incubating.ApolloStoreKt")
+ private val createApolloStoreMethod: Method = apolloStoreKtClass.getMethod(
+ "ApolloStore",
+ NormalizedCacheFactory::class.java,
+ CacheKeyGenerator::class.java,
+ CacheResolver::class.java,
+ )
+
+ private val NormalizedCacheClass = Class.forName("com.apollographql.apollo3.cache.normalized.incubating.NormalizedCache")
+ private val storeMethod: Method = NormalizedCacheClass.getMethod(
+ "store",
+ ApolloClient.Builder::class.java,
+ ApolloStore::class.java,
+ Boolean::class.java,
+ )
+
+ private fun ApolloClient.Builder.store(store: ApolloStore): ApolloClient.Builder {
+ return storeMethod.invoke(
+ null,
+ this,
+ store,
+ false,
+ ) as ApolloClient.Builder
+ }
+ }
+}
+
+
diff --git a/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIntegrationTests.kt b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIntegrationTests.kt
new file mode 100644
index 00000000000..a088efaed44
--- /dev/null
+++ b/benchmark/microbenchmark/src/androidTest/java/com/apollographql/apollo3/benchmark/CacheIntegrationTests.kt
@@ -0,0 +1,113 @@
+package com.apollographql.apollo3.benchmark
+
+import androidx.benchmark.junit4.BenchmarkRule
+import androidx.benchmark.junit4.measureRepeated
+import com.apollographql.apollo3.ApolloClient
+import com.apollographql.apollo3.api.json.jsonReader
+import com.apollographql.apollo3.api.parseJsonResponse
+import com.apollographql.apollo3.benchmark.Utils.dbName
+import com.apollographql.apollo3.benchmark.Utils.operationBasedQuery
+import com.apollographql.apollo3.benchmark.Utils.resource
+import com.apollographql.apollo3.benchmark.test.R
+import com.apollographql.apollo3.cache.normalized.ApolloStore
+import com.apollographql.apollo3.cache.normalized.FetchPolicy
+import com.apollographql.apollo3.cache.normalized.api.MemoryCacheFactory
+import com.apollographql.apollo3.cache.normalized.api.NormalizedCacheFactory
+import com.apollographql.apollo3.cache.normalized.fetchPolicy
+import com.apollographql.apollo3.cache.normalized.sql.SqlNormalizedCacheFactory
+import com.apollographql.apollo3.cache.normalized.store
+import com.apollographql.apollo3.mockserver.MockRequestBase
+import com.apollographql.apollo3.mockserver.MockResponse
+import com.apollographql.apollo3.mockserver.MockServer
+import com.apollographql.apollo3.mockserver.MockServerHandler
+import com.apollographql.apollo3.testing.MapTestNetworkTransport
+import com.apollographql.apollo3.testing.registerTestResponse
+import kotlinx.coroutines.joinAll
+import kotlinx.coroutines.launch
+import kotlinx.coroutines.runBlocking
+import org.junit.Rule
+import org.junit.Test
+
+class CacheIntegrationTests {
+ @get:Rule
+ val benchmarkRule = BenchmarkRule()
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportMemory() {
+ concurrentQueries(MemoryCacheFactory(), withMockServer = false)
+ }
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = SqlNormalizedCacheFactory(dbName)
+ concurrentQueries(cacheFactory, withMockServer = false)
+ }
+
+ @Test
+ fun concurrentQueriesTestNetworkTransportMemoryThenSql() {
+ Utils.dbFile.delete()
+ val cacheFactory = MemoryCacheFactory().chain(SqlNormalizedCacheFactory(dbName))
+ concurrentQueries(cacheFactory, withMockServer = false)
+ }
+
+
+ private fun concurrentQueries(cacheFactory: NormalizedCacheFactory, withMockServer: Boolean) {
+ val mockServer = MockServer.Builder()
+ .handler(
+ object : MockServerHandler {
+ private val mockResponse = MockResponse.Builder()
+ .statusCode(200)
+ .body(resource(R.raw.calendar_response_simple).readByteString())
+ .build()
+
+ override fun handle(request: MockRequestBase): MockResponse {
+ return mockResponse
+ }
+ }
+ )
+ .build()
+
+ val client = ApolloClient.Builder()
+ .let {
+ if (withMockServer) {
+ it.serverUrl(runBlocking { mockServer.url() })
+ } else {
+ it.networkTransport(MapTestNetworkTransport())
+ }
+ }
+ .store(createApolloStore(cacheFactory))
+ .build()
+ if (!withMockServer) {
+ client.registerTestResponse(operationBasedQuery, operationBasedQuery.parseJsonResponse(resource(R.raw.calendar_response_simple).jsonReader()).data!!)
+ }
+
+ benchmarkRule.measureRepeated {
+ runBlocking {
+ (1..CONCURRENCY).map {
+ launch {
+ // Let each job execute a few queries
+ repeat(WORK_LOAD) {
+ client.query(operationBasedQuery).fetchPolicy(FetchPolicy.NetworkOnly).execute().dataOrThrow()
+ client.query(operationBasedQuery).fetchPolicy(FetchPolicy.CacheOnly).execute().dataOrThrow()
+ }
+ }
+ }
+ // Wait for all jobs to finish
+ .joinAll()
+ }
+ }
+ }
+
+ private fun createApolloStore(cacheFactory: NormalizedCacheFactory): ApolloStore {
+ return ApolloStore(cacheFactory)
+ }
+
+
+ companion object {
+ private const val CONCURRENCY = 10
+ private const val WORK_LOAD = 8
+ }
+}
+
+
diff --git a/benchmark/microbenchmark/src/androidTest/res/raw/calendar_response_simple.json b/benchmark/microbenchmark/src/androidTest/res/raw/calendar_response_simple.json
new file mode 100644
index 00000000000..e9ec48ab27e
--- /dev/null
+++ b/benchmark/microbenchmark/src/androidTest/res/raw/calendar_response_simple.json
@@ -0,0 +1,112 @@
+{
+ "data": {
+ "items": {
+ "edges": [
+ {
+ "id": "a3997cde-a335-4752-b5fe-6cb625066c30",
+ "node": {
+ "__typename": "Item",
+ "id": "a3997cde-a335-4752-b5fe-6cb625066c30",
+ "title": "Holiday - Tom Cruise",
+ "type": {
+ "id": "Event",
+ "node": {
+ "__typename": "ItemType",
+ "id": "Event",
+ "name": "Event",
+ "defaultCategory": {
+ "id": "General",
+ "node": {
+ "__typename": "Category",
+ "id": "General",
+ "name": "General",
+ "icon": {
+ "id": "General",
+ "node": {
+ "__typename": "Icon",
+ "id": "General",
+ "name": "General"
+ }
+ },
+ "primaryColor": "Gray400",
+ "secondaryColor": "Gray200"
+ }
+ },
+ "defaultIcon": {
+ "id": "Calendar",
+ "node": {
+ "__typename": "Icon",
+ "id": "Calendar",
+ "name": "Calendar"
+ }
+ }
+ }
+ },
+ "icon": {
+ "id": "Beach",
+ "node": {
+ "__typename": "Icon",
+ "id": "Beach",
+ "name": "Beach"
+ }
+ },
+ "category": {
+ "id": "Work",
+ "node": {
+ "__typename": "Category",
+ "id": "Work",
+ "name": "Work",
+ "icon": {
+ "id": "Suitcase",
+ "node": {
+ "__typename": "Icon",
+ "id": "Suitcase",
+ "name": "Suitcase"
+ }
+ },
+ "primaryColor": "BlueMedium",
+ "secondaryColor": "BlueBright"
+ }
+ },
+ "start": {
+ "__typename": "DateTimeInfo",
+ "date": "2022-05-02",
+ "dateTime": null,
+ "timeZone": null
+ },
+ "end": {
+ "__typename": "DateTimeInfo",
+ "date": "2022-05-03",
+ "dateTime": null,
+ "timeZone": null
+ },
+ "series": null,
+ "calendar": {
+ "id": "7eac2ed6-24f6-4fbc-ab30-80effff39ae9",
+ "node": {
+ "__typename": "Calendar",
+ "id": "7eac2ed6-24f6-4fbc-ab30-80effff39ae9",
+ "name": "Team Holidays",
+ "canCreate": false,
+ "provider": {
+ "node": {
+ "__typename": "CalendarProvider",
+ "id": "cc8e4c28-f178-11ec-8ea0-0242ac120002",
+ "type": "google",
+ "displayName": "Google",
+ "username": "someone@somewhere.com"
+ }
+ }
+ }
+ }
+ }
+ }
+ ],
+ "pageInfo": {
+ "__typename": "PageInfo",
+ "endCursor": "c4d77372-135a-4cfe-8370-81e37d81347b",
+ "hasNextPage": true
+ }
+ }
+ }
+}
diff --git a/benchmark/microbenchmark/src/main/AndroidManifest.xml b/benchmark/microbenchmark/src/main/AndroidManifest.xml
index 72637df10df..898a2280291 100644
--- a/benchmark/microbenchmark/src/main/AndroidManifest.xml
+++ b/benchmark/microbenchmark/src/main/AndroidManifest.xml
@@ -5,9 +5,11 @@
+
+
-
\ No newline at end of file
+
diff --git a/libraries/apollo-normalized-cache-api-incubating/build.gradle.kts b/libraries/apollo-normalized-cache-api-incubating/build.gradle.kts
index ee7519d2266..7de9e274ac5 100644
--- a/libraries/apollo-normalized-cache-api-incubating/build.gradle.kts
+++ b/libraries/apollo-normalized-cache-api-incubating/build.gradle.kts
@@ -15,6 +15,9 @@ kotlin {
api(project(":apollo-mpp-utils"))
implementation(libs.okio)
api(libs.uuid)
+ implementation(libs.atomicfu.get().toString()) {
+ because("Use of ReentrantLock for Apple (we don't use the gradle plugin rewrite)")
+ }
}
}
}
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-apple.kt b/libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-apple.kt
deleted file mode 100644
index 377ecb6f8fd..00000000000
--- a/libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-apple.kt
+++ /dev/null
@@ -1,5 +0,0 @@
-package com.apollographql.apollo3.cache.normalized.api.internal
-
-internal actual class CacheLock actual constructor() {
- actual fun lock(block: () -> T): T = block()
-}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt b/libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
similarity index 78%
rename from libraries/apollo-normalized-cache-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
rename to libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
index 91b29293da3..3e6c9bb4415 100644
--- a/libraries/apollo-normalized-cache-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/appleMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
@@ -1,10 +1,10 @@
-package com.apollographql.apollo3.cache.normalized.internal
+package com.apollographql.apollo3.cache.normalized.api.internal
import kotlinx.atomicfu.locks.ReentrantLock
import kotlinx.atomicfu.locks.reentrantLock
import kotlinx.atomicfu.locks.withLock
-internal actual class Lock {
+actual class Lock {
private val lock: ReentrantLock = reentrantLock()
actual fun read(block: () -> T): T {
@@ -14,4 +14,4 @@ internal actual class Lock {
actual fun write(block: () -> T): T {
return lock.withLock(block)
}
-}
+}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt
index 1bf0e933b31..2edfc8487bf 100644
--- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/MemoryCache.kt
@@ -1,7 +1,7 @@
package com.apollographql.apollo3.cache.normalized.api
import com.apollographql.apollo3.annotations.ApolloExperimental
-import com.apollographql.apollo3.cache.normalized.api.internal.CacheLock
+import com.apollographql.apollo3.cache.normalized.api.internal.Lock
import com.apollographql.apollo3.cache.normalized.api.internal.LruCache
import com.apollographql.apollo3.mpp.currentTimeMillis
import okio.internal.commonAsUtf8ToByteArray
@@ -21,24 +21,16 @@ class MemoryCache(
private val maxSizeBytes: Int = Int.MAX_VALUE,
private val expireAfterMillis: Long = -1,
) : NormalizedCache() {
- /**
- * A lock that is only used during read accesses on the JVM because
- * reads also write in order to:
- * - maintain the LRU order
- * - update the memory cache from the downstream caches
- *
- * write accesses are already locked by a higher level ReadWrite lock
- */
- private val lock = CacheLock()
+ private val lock = Lock()
private val lruCache = LruCache(maxSize = maxSizeBytes) { key, cacheEntry ->
key.commonAsUtf8ToByteArray().size + (cacheEntry?.sizeInBytes ?: 0)
}
val size: Int
- get() = lruCache.size()
+ get() = lock.read { lruCache.size() }
- override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? = lock.lock {
+ override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? = lock.write {
val cacheEntry = lruCache[key]?.also { cacheEntry ->
if (cacheEntry.isExpired || cacheHeaders.hasHeader(ApolloCacheHeaders.EVICT_AFTER_READ)) {
lruCache.remove(key)
@@ -58,24 +50,28 @@ class MemoryCache(
}
override fun clearAll() {
- lruCache.clear()
- nextCache?.clearAll()
+ lock.write {
+ lruCache.clear()
+ nextCache?.clearAll()
+ }
}
override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean {
- val cacheEntry = lruCache.remove(cacheKey.key)
+ return lock.write {
+ val cacheEntry = lruCache.remove(cacheKey.key)
- if (cascade && cacheEntry != null) {
- for (cacheReference in cacheEntry.record.referencedFields()) {
- remove(CacheKey(cacheReference.key), true)
+ if (cascade && cacheEntry != null) {
+ for (cacheReference in cacheEntry.record.referencedFields()) {
+ remove(CacheKey(cacheReference.key), true)
+ }
}
- }
- val chainRemoved = nextCache?.remove(cacheKey, cascade) ?: false
- return cacheEntry != null || chainRemoved
+ val chainRemoved = nextCache?.remove(cacheKey, cascade) ?: false
+ cacheEntry != null || chainRemoved
+ }
}
- override fun remove(pattern: String): Int {
+ override fun remove(pattern: String): Int = lock.write {
val regex = patternToRegex(pattern)
var total = 0
val keys = HashSet(lruCache.keys()) // local copy to avoid concurrent modification
@@ -87,7 +83,7 @@ class MemoryCache(
}
val chainRemoved = nextCache?.remove(pattern) ?: 0
- return total + chainRemoved
+ total + chainRemoved
}
override fun merge(record: Record, cacheHeaders: CacheHeaders): Set {
@@ -103,24 +99,24 @@ class MemoryCache(
if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) {
return emptySet()
}
-
- val oldRecord = loadRecord(record.key, cacheHeaders)
- val changedKeys = if (oldRecord == null) {
- lruCache[record.key] = CacheEntry(
- record = record,
- expireAfterMillis = expireAfterMillis
- )
- record.fieldKeys()
- } else {
- val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record, newDate = null)
- lruCache[record.key] = CacheEntry(
- record = mergedRecord,
- expireAfterMillis = expireAfterMillis
- )
- changedKeys
+ return lock.write {
+ val oldRecord = loadRecord(record.key, cacheHeaders)
+ val changedKeys = if (oldRecord == null) {
+ lruCache[record.key] = CacheEntry(
+ record = record,
+ expireAfterMillis = expireAfterMillis
+ )
+ record.fieldKeys()
+ } else {
+ val (mergedRecord, changedKeys) = recordMerger.merge(existing = oldRecord, incoming = record, newDate = null)
+ lruCache[record.key] = CacheEntry(
+ record = mergedRecord,
+ expireAfterMillis = expireAfterMillis
+ )
+ changedKeys
+ }
+ changedKeys + nextCache?.merge(record, cacheHeaders, recordMerger).orEmpty()
}
-
- return changedKeys + nextCache?.merge(record, cacheHeaders, recordMerger).orEmpty()
}
@ApolloExperimental
@@ -128,17 +124,17 @@ class MemoryCache(
if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) {
return emptySet()
}
- return records.flatMap { record -> merge(record, cacheHeaders, recordMerger) }.toSet()
+ return lock.write { records.flatMap { record -> merge(record, cacheHeaders, recordMerger) } }.toSet()
}
override fun dump(): Map, Map> {
- return mapOf(
- this::class to lruCache.dump().mapValues { (_, entry) -> entry.record }
- ) + nextCache?.dump().orEmpty()
+ return lock.read {
+ mapOf(this::class to lruCache.dump().mapValues { (_, entry) -> entry.record }) + nextCache?.dump().orEmpty()
+ }
}
internal fun clearCurrentCache() {
- lruCache.clear()
+ lock.write { lruCache.clear() }
}
private class CacheEntry(
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.kt
deleted file mode 100644
index d933b360ed6..00000000000
--- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.kt
+++ /dev/null
@@ -1,5 +0,0 @@
-package com.apollographql.apollo3.cache.normalized.api.internal
-
-internal expect class CacheLock() {
- fun lock(block: () -> T): T
-}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
similarity index 62%
rename from libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
rename to libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
index 374d06e01f3..7f759e9fdbb 100644
--- a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
@@ -1,6 +1,6 @@
-package com.apollographql.apollo3.cache.normalized.internal
+package com.apollographql.apollo3.cache.normalized.api.internal
-import kotlinx.atomicfu.locks.ReentrantLock
+import com.apollographql.apollo3.annotations.ApolloInternal
/**
* A lock with read/write semantics where possible.
@@ -8,7 +8,8 @@ import kotlinx.atomicfu.locks.ReentrantLock
* - uses Java's `ReentrantReadWriteLock` on the JVM
* - uses AtomicFu's [ReentrantLock] on Native (read and write are not distinguished)
*/
-internal expect class Lock() {
+@ApolloInternal
+expect class Lock() {
fun read(block: () -> T): T
fun write(block: () -> T): T
-}
+}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticCache.kt b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticCache.kt
index 2275c705708..2c2948d59ee 100644
--- a/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticCache.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/OptimisticCache.kt
@@ -14,115 +14,130 @@ import kotlin.reflect.KClass
@ApolloInternal
class OptimisticCache : NormalizedCache() {
+ private val lock = Lock()
private val recordJournals = mutableMapOf()
override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? {
- return try {
- val nonOptimisticRecord = nextCache?.loadRecord(key, cacheHeaders)
- nonOptimisticRecord.mergeJournalRecord(key)
- } catch (ignore: Exception) {
- null
+ return lock.read {
+ try {
+ val nonOptimisticRecord = nextCache?.loadRecord(key, cacheHeaders)
+ nonOptimisticRecord.mergeJournalRecord(key)
+ } catch (ignore: Exception) {
+ null
+ }
}
}
override fun loadRecords(keys: Collection, cacheHeaders: CacheHeaders): Collection {
- val nonOptimisticRecords = nextCache?.loadRecords(keys, cacheHeaders)?.associateBy { it.key } ?: emptyMap()
- return keys.mapNotNull { key ->
- nonOptimisticRecords[key].mergeJournalRecord(key)
+ return lock.read {
+ val nonOptimisticRecords = nextCache?.loadRecords(keys, cacheHeaders)?.associateBy { it.key } ?: emptyMap()
+ keys.mapNotNull { key ->
+ nonOptimisticRecords[key].mergeJournalRecord(key)
+ }
}
}
override fun merge(record: Record, cacheHeaders: CacheHeaders): Set {
- return nextCache?.merge(record, cacheHeaders) ?: emptySet()
+ return lock.write { nextCache?.merge(record, cacheHeaders) } ?: emptySet()
}
override fun merge(records: Collection, cacheHeaders: CacheHeaders): Set {
- return nextCache?.merge(records, cacheHeaders) ?: emptySet()
+ return lock.write { nextCache?.merge(records, cacheHeaders) } ?: emptySet()
}
@ApolloExperimental
override fun merge(record: Record, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
- return nextCache?.merge(record, cacheHeaders, recordMerger) ?: emptySet()
+ return lock.write { nextCache?.merge(record, cacheHeaders, recordMerger) } ?: emptySet()
}
@ApolloExperimental
override fun merge(records: Collection, cacheHeaders: CacheHeaders, recordMerger: RecordMerger): Set {
- return nextCache?.merge(records, cacheHeaders, recordMerger) ?: emptySet()
+ return lock.write { nextCache?.merge(records, cacheHeaders, recordMerger) } ?: emptySet()
}
override fun clearAll() {
- recordJournals.clear()
- nextCache?.clearAll()
+ lock.write {
+ recordJournals.clear()
+ nextCache?.clearAll()
+ }
}
override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean {
- var result: Boolean = nextCache?.remove(cacheKey, cascade) ?: false
-
- val recordJournal = recordJournals[cacheKey.key]
- if (recordJournal != null) {
- recordJournals.remove(cacheKey.key)
- result = true
- if (cascade) {
- for (cacheReference in recordJournal.current.referencedFields()) {
- result = result && remove(CacheKey(cacheReference.key), true)
+ return lock.write {
+ var result: Boolean = nextCache?.remove(cacheKey, cascade) ?: false
+
+ val recordJournal = recordJournals[cacheKey.key]
+ if (recordJournal != null) {
+ recordJournals.remove(cacheKey.key)
+ result = true
+ if (cascade) {
+ for (cacheReference in recordJournal.current.referencedFields()) {
+ result = result && remove(CacheKey(cacheReference.key), true)
+ }
}
}
+ result
}
- return result
}
override fun remove(pattern: String): Int {
val regex = patternToRegex(pattern)
var total = 0
- val iterator = recordJournals.iterator()
- while(iterator.hasNext()) {
- val entry = iterator.next()
- if (regex.matches(entry.key)) {
- iterator.remove()
- total++
+ return lock.write {
+ val iterator = recordJournals.iterator()
+ while (iterator.hasNext()) {
+ val entry = iterator.next()
+ if (regex.matches(entry.key)) {
+ iterator.remove()
+ total++
+ }
}
- }
- val chainRemoved = nextCache?.remove(pattern) ?: 0
- return total + chainRemoved
+ val chainRemoved = nextCache?.remove(pattern) ?: 0
+ total + chainRemoved
+ }
}
fun addOptimisticUpdates(recordSet: Collection): Set {
- return recordSet.flatMap {
- addOptimisticUpdate(it)
+ return lock.write {
+ recordSet.flatMap {
+ addOptimisticUpdate(it)
+ }
}.toSet()
}
fun addOptimisticUpdate(record: Record): Set {
- val journal = recordJournals[record.key]
- return if (journal == null) {
- recordJournals[record.key] = RecordJournal(record)
- record.fieldKeys()
- } else {
- journal.addPatch(record)
+ return lock.write {
+ val journal = recordJournals[record.key]
+ if (journal == null) {
+ recordJournals[record.key] = RecordJournal(record)
+ record.fieldKeys()
+ } else {
+ journal.addPatch(record)
+ }
}
}
fun removeOptimisticUpdates(mutationId: Uuid): Set {
- val changedCacheKeys = mutableSetOf()
-
- val iterator = recordJournals.iterator()
- while(iterator.hasNext()) {
- val entry = iterator.next()
- val result = entry.value.removePatch(mutationId)
- changedCacheKeys.addAll(result.changedKeys)
- if (result.isEmpty) {
- iterator.remove()
+ return lock.write {
+ val changedCacheKeys = mutableSetOf()
+ val iterator = recordJournals.iterator()
+ while (iterator.hasNext()) {
+ val entry = iterator.next()
+ val result = entry.value.removePatch(mutationId)
+ changedCacheKeys.addAll(result.changedKeys)
+ if (result.isEmpty) {
+ iterator.remove()
+ }
}
+ changedCacheKeys
}
-
- return changedCacheKeys
}
override fun dump(): Map, Map> {
- return mapOf(
- this::class to recordJournals.mapValues { (_, journal) -> journal.current }
- ) + nextCache?.dump().orEmpty()
+ return lock.read {
+ mapOf(this::class to recordJournals.mapValues { (_, journal) -> journal.current }) + nextCache?.dump().orEmpty()
+ }
}
private fun Record?.mergeJournalRecord(key: String): Record? {
@@ -136,7 +151,7 @@ class OptimisticCache : NormalizedCache() {
private class RemovalResult(
val changedKeys: Set,
- val isEmpty: Boolean
+ val isEmpty: Boolean,
)
private class RecordJournal(record: Record) {
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-js.kt b/libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-js.kt
deleted file mode 100644
index 377ecb6f8fd..00000000000
--- a/libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-js.kt
+++ /dev/null
@@ -1,5 +0,0 @@
-package com.apollographql.apollo3.cache.normalized.api.internal
-
-internal actual class CacheLock actual constructor() {
- actual fun lock(block: () -> T): T = block()
-}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt b/libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
similarity index 59%
rename from libraries/apollo-normalized-cache-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
rename to libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
index 28b7504bb47..bf9e573ad98 100644
--- a/libraries/apollo-normalized-cache-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/jsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
@@ -1,6 +1,6 @@
-package com.apollographql.apollo3.cache.normalized.internal
+package com.apollographql.apollo3.cache.normalized.api.internal
-internal actual class Lock {
+actual class Lock {
actual fun read(block: () -> T): T {
return block()
}
@@ -8,4 +8,4 @@ internal actual class Lock {
actual fun write(block: () -> T): T {
return block()
}
-}
+}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-jvm.kt b/libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-jvm.kt
deleted file mode 100644
index c7e8f9e4be2..00000000000
--- a/libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock-jvm.kt
+++ /dev/null
@@ -1,9 +0,0 @@
-package com.apollographql.apollo3.cache.normalized.api.internal
-
-internal actual class CacheLock actual constructor() {
- actual fun lock(block: () -> T): T {
- return synchronized(this) {
- block()
- }
- }
-}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt b/libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
similarity index 79%
rename from libraries/apollo-normalized-cache-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
rename to libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
index a1355087d52..ab34aacfbe3 100644
--- a/libraries/apollo-normalized-cache-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/jvmMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
@@ -1,10 +1,10 @@
-package com.apollographql.apollo3.cache.normalized.internal
+package com.apollographql.apollo3.cache.normalized.api.internal
import java.util.concurrent.locks.ReentrantReadWriteLock
import kotlin.concurrent.read
import kotlin.concurrent.write
-internal actual class Lock {
+actual class Lock {
private val lock = ReentrantReadWriteLock()
actual fun read(block: () -> T): T {
@@ -18,4 +18,4 @@ internal actual class Lock {
block()
}
}
-}
+}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.wasmJs.kt b/libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.wasmJs.kt
deleted file mode 100644
index de9fe323bdd..00000000000
--- a/libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/-cache-lock.wasmJs.kt
+++ /dev/null
@@ -1,7 +0,0 @@
-package com.apollographql.apollo3.cache.normalized.api.internal
-
-internal actual class CacheLock actual constructor() {
- actual fun lock(block: () -> T): T {
- return block()
- }
-}
\ No newline at end of file
diff --git a/libraries/apollo-normalized-cache-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.wasmJs.kt b/libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
similarity index 75%
rename from libraries/apollo-normalized-cache-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.wasmJs.kt
rename to libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
index 9b0e0d03f14..1628a9478ab 100644
--- a/libraries/apollo-normalized-cache-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/Lock.wasmJs.kt
+++ b/libraries/apollo-normalized-cache-api-incubating/src/wasmJsMain/kotlin/com/apollographql/apollo3/cache/normalized/api/internal/Lock.kt
@@ -1,4 +1,4 @@
-package com.apollographql.apollo3.cache.normalized.internal
+package com.apollographql.apollo3.cache.normalized.api.internal
/**
* A lock with read/write semantics where possible.
@@ -6,7 +6,7 @@ package com.apollographql.apollo3.cache.normalized.internal
* - uses Java's `ReentrantReadWriteLock` on the JVM
* - uses AtomicFu's [ReentrantLock] on Native (read and write are not distinguished)
*/
-internal actual class Lock actual constructor() {
+actual class Lock actual constructor() {
actual fun read(block: () -> T): T {
return block()
}
diff --git a/libraries/apollo-normalized-cache-incubating/build.gradle.kts b/libraries/apollo-normalized-cache-incubating/build.gradle.kts
index 09b0c637989..cf677c5a9b6 100644
--- a/libraries/apollo-normalized-cache-incubating/build.gradle.kts
+++ b/libraries/apollo-normalized-cache-incubating/build.gradle.kts
@@ -14,9 +14,6 @@ kotlin {
api(project(":apollo-runtime"))
api(project(":apollo-normalized-cache-api-incubating"))
api(libs.kotlinx.coroutines)
- implementation(libs.atomicfu.get().toString()) {
- because("Use of ReentrantLock in DefaultApolloStore for Apple (we don't use the gradle plugin rewrite)")
- }
}
}
}
diff --git a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt b/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt
index 1f20d854c92..eb945f21f6d 100644
--- a/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt
+++ b/libraries/apollo-normalized-cache-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/internal/DefaultApolloStore.kt
@@ -51,8 +51,6 @@ internal class DefaultApolloStore(
OptimisticCache().chain(normalizedCacheFactory.createChain()) as OptimisticCache
}
- private val lock = Lock()
-
override fun publish(keys: Set) {
if (keys.isEmpty()) {
return
@@ -62,9 +60,7 @@ internal class DefaultApolloStore(
}
override fun clearAll(): Boolean {
- lock.write {
- cache.clearAll()
- }
+ cache.clearAll()
return true
}
@@ -72,24 +68,20 @@ internal class DefaultApolloStore(
cacheKey: CacheKey,
cascade: Boolean,
): Boolean {
- return lock.write {
- cache.remove(cacheKey, cascade)
- }
+ return cache.remove(cacheKey, cascade)
}
override fun remove(
cacheKeys: List,
cascade: Boolean,
): Int {
- return lock.write {
- var count = 0
- for (cacheKey in cacheKeys) {
- if (cache.remove(cacheKey, cascade = cascade)) {
- count++
- }
+ var count = 0
+ for (cacheKey in cacheKeys) {
+ if (cache.remove(cacheKey, cascade = cascade)) {
+ count++
}
- count
}
+ return count
}
override fun normalize(
@@ -111,15 +103,13 @@ internal class DefaultApolloStore(
cacheHeaders: CacheHeaders,
): D {
val variables = operation.variables(customScalarAdapters, true)
- return lock.read {
- operation.readDataFromCachePrivate(
- cache = cache,
- cacheResolver = cacheResolver,
- cacheHeaders = cacheHeaders,
- cacheKey = CacheKey.rootKey(),
- variables = variables
- )
- }.toData(operation.adapter(), customScalarAdapters, variables)
+ return operation.readDataFromCachePrivate(
+ cache = cache,
+ cacheResolver = cacheResolver,
+ cacheHeaders = cacheHeaders,
+ cacheKey = CacheKey.rootKey(),
+ variables = variables
+ ).toData(operation.adapter(), customScalarAdapters, variables)
}
override fun readFragment(
@@ -130,22 +120,17 @@ internal class DefaultApolloStore(
): D {
val variables = fragment.variables(customScalarAdapters, true)
- return lock.read {
- fragment.readDataFromCachePrivate(
- cache = cache,
- cacheResolver = cacheResolver,
- cacheHeaders = cacheHeaders,
- cacheKey = cacheKey,
- variables = variables,
- )
- }.toData(fragment.adapter(), customScalarAdapters, variables)
+ return fragment.readDataFromCachePrivate(
+ cache = cache,
+ cacheResolver = cacheResolver,
+ cacheHeaders = cacheHeaders,
+ cacheKey = cacheKey,
+ variables = variables,
+ ).toData(fragment.adapter(), customScalarAdapters, variables)
}
override fun accessCache(block: (NormalizedCache) -> R): R {
- /**
- * We don't know how the cache is going to be used, assume write access
- */
- return lock.write { block(cache) }
+ return block(cache)
}
override fun writeOperation(
@@ -180,10 +165,7 @@ internal class DefaultApolloStore(
rootKey = cacheKey.key
).values
- val changedKeys = lock.write {
- cache.merge(records, cacheHeaders, recordMerger)
- }
-
+ val changedKeys = cache.merge(records, cacheHeaders, recordMerger)
if (publish) {
publish(changedKeys)
}
@@ -205,10 +187,7 @@ internal class DefaultApolloStore(
metadataGenerator = metadataGenerator,
).values.toSet()
- val changedKeys = lock.write {
- cache.merge(records, cacheHeaders, recordMerger)
- }
-
+ val changedKeys = cache.merge(records, cacheHeaders, recordMerger)
if (publish) {
publish(changedKeys)
}
@@ -237,13 +216,10 @@ internal class DefaultApolloStore(
)
}
- val changedKeys = lock.write {
- /**
- * TODO: should we forward the cache headers to the optimistic store?
- */
- cache.addOptimisticUpdates(records)
- }
-
+ /**
+ * TODO: should we forward the cache headers to the optimistic store?
+ */
+ val changedKeys = cache.addOptimisticUpdates(records)
if (publish) {
publish(changedKeys)
}
@@ -255,10 +231,7 @@ internal class DefaultApolloStore(
mutationId: Uuid,
publish: Boolean,
): Set {
- val changedKeys = lock.write {
- cache.removeOptimisticUpdates(mutationId)
- }
-
+ val changedKeys = cache.removeOptimisticUpdates(mutationId)
if (publish) {
publish(changedKeys)
}
@@ -267,15 +240,11 @@ internal class DefaultApolloStore(
}
fun merge(record: Record, cacheHeaders: CacheHeaders): Set {
- return lock.write {
- cache.merge(record, cacheHeaders, recordMerger)
- }
+ return cache.merge(record, cacheHeaders, recordMerger)
}
override fun dump(): Map, Map> {
- return lock.read {
- cache.dump()
- }
+ return cache.dump()
}
override fun dispose() {}
diff --git a/libraries/apollo-normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/sql/SqlNormalizedCache.kt b/libraries/apollo-normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/sql/SqlNormalizedCache.kt
index 31c86fdf9c5..07d8448da80 100644
--- a/libraries/apollo-normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/sql/SqlNormalizedCache.kt
+++ b/libraries/apollo-normalized-cache-sqlite-incubating/src/commonMain/kotlin/com/apollographql/apollo3/cache/normalized/sql/SqlNormalizedCache.kt
@@ -9,6 +9,7 @@ import com.apollographql.apollo3.cache.normalized.api.DefaultRecordMerger
import com.apollographql.apollo3.cache.normalized.api.NormalizedCache
import com.apollographql.apollo3.cache.normalized.api.Record
import com.apollographql.apollo3.cache.normalized.api.RecordMerger
+import com.apollographql.apollo3.cache.normalized.api.internal.Lock
import com.apollographql.apollo3.cache.normalized.sql.internal.RecordDatabase
import com.apollographql.apollo3.exception.apolloExceptionHandler
import kotlin.reflect.KClass
@@ -17,66 +18,101 @@ class SqlNormalizedCache internal constructor(
private val recordDatabase: RecordDatabase,
) : NormalizedCache() {
- override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? {
- val record = try {
- recordDatabase.select(key)
- } catch (e: Exception) {
- // Unable to read the record from the database, it is possibly corrupted - treat this as a cache miss
- apolloExceptionHandler(Exception("Unable to read a record from the database", e))
- null
- }
- if (record != null) {
- if (cacheHeaders.hasHeader(EVICT_AFTER_READ)) {
- recordDatabase.delete(key)
+ // A lock is only needed if there is a nextCache
+ private val lock = nextCache?.let { Lock() }
+
+ private fun lockWrite(block: () -> T): T {
+ return lock?.write { block() } ?: block()
+ }
+
+ private fun lockRead(block: () -> T): T {
+ return lock?.read { block() } ?: block()
+ }
+
+ private fun maybeTransaction(condition: Boolean, block: () -> T): T {
+ return if (condition) {
+ recordDatabase.transaction {
+ block()
}
- return record
+ } else {
+ block()
+ }
+ }
+
+ override fun loadRecord(key: String, cacheHeaders: CacheHeaders): Record? {
+ val evictAfterRead = cacheHeaders.hasHeader(EVICT_AFTER_READ)
+ return lockWrite {
+ maybeTransaction(evictAfterRead) {
+ try {
+ recordDatabase.select(key)
+ } catch (e: Exception) {
+ // Unable to read the record from the database, it is possibly corrupted - treat this as a cache miss
+ apolloExceptionHandler(Exception("Unable to read a record from the database", e))
+ null
+ }?.also {
+ if (evictAfterRead) {
+ recordDatabase.delete(key)
+ }
+ }
+ } ?: nextCache?.loadRecord(key, cacheHeaders)
}
- return nextCache?.loadRecord(key, cacheHeaders)
}
override fun loadRecords(keys: Collection, cacheHeaders: CacheHeaders): Collection {
- val records = try {
- internalGetRecords(keys)
- } catch (e: Exception) {
- // Unable to read the records from the database, it is possibly corrupted - treat this as a cache miss
- apolloExceptionHandler(Exception("Unable to read records from the database", e))
- emptyList()
- }
- if (cacheHeaders.hasHeader(EVICT_AFTER_READ)) {
- records.forEach { record ->
- recordDatabase.delete(record.key)
+ val evictAfterRead = cacheHeaders.hasHeader(EVICT_AFTER_READ)
+ return lockWrite {
+ val records = maybeTransaction(evictAfterRead) {
+ try {
+ internalGetRecords(keys)
+ } catch (e: Exception) {
+ // Unable to read the records from the database, it is possibly corrupted - treat this as a cache miss
+ apolloExceptionHandler(Exception("Unable to read records from the database", e))
+ emptyList()
+ }.also {
+ if (evictAfterRead) {
+ it.forEach { record ->
+ recordDatabase.delete(record.key)
+ }
+ }
+ }
}
+ val missRecordKeys = keys - records.map { it.key }.toSet()
+ val missRecords = missRecordKeys.ifEmpty { null }?.let { nextCache?.loadRecords(it, cacheHeaders) }.orEmpty()
+ records + missRecords
}
- val missRecordKeys = keys - records.map { it.key }.toSet()
- val missRecords = missRecordKeys.ifEmpty { null }?.let { nextCache?.loadRecords(it, cacheHeaders) }.orEmpty()
- return records + missRecords
}
override fun clearAll() {
- nextCache?.clearAll()
- recordDatabase.deleteAll()
+ lockWrite {
+ nextCache?.clearAll()
+ recordDatabase.deleteAll()
+ }
}
override fun remove(cacheKey: CacheKey, cascade: Boolean): Boolean {
- val selfRemoved = recordDatabase.transaction {
- internalDeleteRecord(
- key = cacheKey.key,
- cascade = cascade,
- )
+ return lockWrite {
+ val selfRemoved = recordDatabase.transaction {
+ internalDeleteRecord(
+ key = cacheKey.key,
+ cascade = cascade,
+ )
+ }
+ val chainRemoved = nextCache?.remove(cacheKey, cascade) ?: false
+ selfRemoved || chainRemoved
}
- val chainRemoved = nextCache?.remove(cacheKey, cascade) ?: false
- return selfRemoved || chainRemoved
}
override fun remove(pattern: String): Int {
- var selfRemoved = 0
- recordDatabase.transaction {
- recordDatabase.deleteMatching(pattern)
- selfRemoved = recordDatabase.changes().toInt()
- }
- val chainRemoved = nextCache?.remove(pattern) ?: 0
+ return lockWrite {
+ var selfRemoved = 0
+ recordDatabase.transaction {
+ recordDatabase.deleteMatching(pattern)
+ selfRemoved = recordDatabase.changes().toInt()
+ }
+ val chainRemoved = nextCache?.remove(pattern) ?: 0
- return selfRemoved + chainRemoved
+ selfRemoved + chainRemoved
+ }
}
private fun CacheHeaders.date(): Long? {
@@ -96,12 +132,14 @@ class SqlNormalizedCache internal constructor(
if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) {
return emptySet()
}
- return try {
- internalUpdateRecord(record = record, recordMerger = recordMerger, date = cacheHeaders.date()) + nextCache?.merge(record, cacheHeaders).orEmpty()
- } catch (e: Exception) {
- // Unable to merge the record in the database, it is possibly corrupted - treat this as a cache miss
- apolloExceptionHandler(Exception("Unable to merge a record from the database", e))
- emptySet()
+ return lockWrite {
+ try {
+ internalUpdateRecord(record = record, recordMerger = recordMerger, date = cacheHeaders.date())
+ } catch (e: Exception) {
+ // Unable to merge the record in the database, it is possibly corrupted - treat this as a cache miss
+ apolloExceptionHandler(Exception("Unable to merge a record from the database", e))
+ emptySet()
+ } + nextCache?.merge(record, cacheHeaders).orEmpty()
}
}
@@ -110,19 +148,21 @@ class SqlNormalizedCache internal constructor(
if (cacheHeaders.hasHeader(ApolloCacheHeaders.DO_NOT_STORE)) {
return emptySet()
}
- return try {
- internalUpdateRecords(records = records, recordMerger = recordMerger, date = cacheHeaders.date()) + nextCache?.merge(records, cacheHeaders).orEmpty()
- } catch (e: Exception) {
- // Unable to merge the records in the database, it is possibly corrupted - treat this as a cache miss
- apolloExceptionHandler(Exception("Unable to merge records from the database", e))
- emptySet()
+ return lockWrite {
+ try {
+ internalUpdateRecords(records = records, recordMerger = recordMerger, date = cacheHeaders.date())
+ } catch (e: Exception) {
+ // Unable to merge the records in the database, it is possibly corrupted - treat this as a cache miss
+ apolloExceptionHandler(Exception("Unable to merge records from the database", e))
+ emptySet()
+ } + nextCache?.merge(records, cacheHeaders).orEmpty()
}
}
override fun dump(): Map, Map> {
- return mapOf(
- this@SqlNormalizedCache::class to recordDatabase.selectAll().associateBy { it.key }
- ) + nextCache?.dump().orEmpty()
+ return lockRead {
+ mapOf(this::class to recordDatabase.selectAll().associateBy { it.key }) + nextCache?.dump().orEmpty()
+ }
}
/**