diff --git a/Sources/Apollo/ApolloStore.swift b/Sources/Apollo/ApolloStore.swift index 93f923590c..f36e954009 100644 --- a/Sources/Apollo/ApolloStore.swift +++ b/Sources/Apollo/ApolloStore.swift @@ -76,7 +76,7 @@ public final class ApolloStore { public func publish(records: RecordSet, identifier: UUID? = nil, callbackQueue: DispatchQueue = .main, completion: ((Result) -> Void)? = nil) { queue.async(flags: .barrier) { do { - let changedKeys = try self.cache.merge(records: records) + let changedKeys = try self.cache.merge(records: records, identifier: identifier) self.didChangeKeys(changedKeys, identifier: identifier) DispatchQueue.apollo.returnResultAsyncIfNeeded(on: callbackQueue, action: completion, @@ -154,7 +154,7 @@ public final class ApolloStore { /// - Parameters: /// - query: The query to load results for /// - resultHandler: The completion handler to execute on success or error - public func load(query: Operation, callbackQueue: DispatchQueue? = nil, resultHandler: @escaping GraphQLResultHandler) { + public func load(query: Operation, identifer: UUID? = nil, callbackQueue: DispatchQueue? = nil, resultHandler: @escaping GraphQLResultHandler) { withinReadTransaction({ transaction in let mapper = GraphQLSelectionSetMapper() let dependencyTracker = GraphQLDependencyTracker() @@ -162,6 +162,7 @@ public final class ApolloStore { let (data, dependentKeys) = try transaction.execute(selections: Operation.Data.selections, onObjectWithKey: rootCacheKey(for: query), variables: query.variables, + identifer: identifer, accumulator: zip(mapper, dependencyTracker)) return GraphQLResult(data: data, @@ -199,13 +200,17 @@ public final class ApolloStore { accumulator: mapper) } - fileprivate func execute(selections: [GraphQLSelection], onObjectWithKey key: CacheKey, variables: GraphQLMap?, accumulator: Accumulator) throws -> Accumulator.FinalResult { - let object = try loadObject(forKey: key).get() - + fileprivate func execute(selections: [GraphQLSelection], + onObjectWithKey key: CacheKey, + variables: GraphQLMap?, + identifer: UUID? = nil, + accumulator: Accumulator) throws -> Accumulator.FinalResult { + let object = try loadObject(forKey: key, identifer: identifer).get() + let executor = GraphQLExecutor { object, info in return object[info.cacheKeyForField] } resolveReference: { reference in - self.loadObject(forKey: reference.key) + self.loadObject(forKey: reference.key, identifer: identifer) } executor.cacheKeyForObject = self.cacheKeyForObject @@ -217,8 +222,8 @@ public final class ApolloStore { accumulator: accumulator) } - private final func loadObject(forKey key: CacheKey) -> PossiblyDeferred { - self.loader[key].map { record in + private final func loadObject(forKey key: CacheKey, identifer: UUID? = nil) -> PossiblyDeferred { + self.loader[key, identifer].map { record in guard let record = record else { throw JSONDecodingError.missingValue } return record.fields } @@ -308,8 +313,8 @@ public final class ApolloStore { withKey: key, variables: variables, accumulator: normalizer) - let changedKeys = try self.cache.merge(records: records) - + let changedKeys = try self.cache.merge(records: records, identifier: nil) + // Remove cached records, so subsequent reads // within the same transaction will reload the updated value. loader.removeAll() diff --git a/Sources/Apollo/DataLoader.swift b/Sources/Apollo/DataLoader.swift index 7992d06ba8..bfd44ec376 100644 --- a/Sources/Apollo/DataLoader.swift +++ b/Sources/Apollo/DataLoader.swift @@ -1,7 +1,7 @@ import Foundation final class DataLoader { - public typealias BatchLoad = (Set) throws -> [Key: Value] + public typealias BatchLoad = (Set, UUID?) throws -> [Key: Value] private var batchLoad: BatchLoad private var cache: [Key: Result] = [:] @@ -11,25 +11,25 @@ final class DataLoader { self.batchLoad = batchLoad } - subscript(key: Key) -> PossiblyDeferred { + subscript(key: Key, identifier: UUID? = nil) -> PossiblyDeferred { if let cachedResult = cache[key] { return .immediate(cachedResult) } pendingLoads.insert(key) - return .deferred { try self.load(key) } + return .deferred { try self.load(key, identifier) } } - private func load(_ key: Key) throws -> Value? { + private func load(_ key: Key, _ identifier: UUID?) throws -> Value? { if let cachedResult = cache[key] { return try cachedResult.get() } assert(pendingLoads.contains(key)) - let values = try batchLoad(pendingLoads) - + let values = try batchLoad(pendingLoads, identifier) + for key in pendingLoads { cache[key] = .success(values[key]) } diff --git a/Sources/Apollo/InMemoryNormalizedCache.swift b/Sources/Apollo/InMemoryNormalizedCache.swift index be9d828490..eb98f173ac 100644 --- a/Sources/Apollo/InMemoryNormalizedCache.swift +++ b/Sources/Apollo/InMemoryNormalizedCache.swift @@ -1,5 +1,6 @@ import Foundation +// Add max size and auto eviction policy public final class InMemoryNormalizedCache: NormalizedCache { private var records: RecordSet @@ -7,7 +8,7 @@ public final class InMemoryNormalizedCache: NormalizedCache { self.records = records } - public func loadRecords(forKeys keys: Set) throws -> [CacheKey: Record] { + public func loadRecords(forKeys keys: Set, identifier: UUID? = nil) throws -> [CacheKey: Record] { return keys.reduce(into: [:]) { result, key in result[key] = records[key] } @@ -17,7 +18,7 @@ public final class InMemoryNormalizedCache: NormalizedCache { records.removeRecord(for: key) } - public func merge(records newRecords: RecordSet) throws -> Set { + public func merge(records newRecords: RecordSet, identifier: UUID? = nil) throws -> Set { return records.merge(records: newRecords) } diff --git a/Sources/Apollo/NormalizedCache.swift b/Sources/Apollo/NormalizedCache.swift index e81d4ef310..fd9c9113b4 100644 --- a/Sources/Apollo/NormalizedCache.swift +++ b/Sources/Apollo/NormalizedCache.swift @@ -7,14 +7,14 @@ public protocol NormalizedCache { /// - Parameters: /// - key: The cache keys to load data for /// - Returns: A dictionary of cache keys to records containing the records that have been found. - func loadRecords(forKeys keys: Set) throws -> [CacheKey: Record] - + func loadRecords(forKeys keys: Set, identifier: UUID?) throws -> [CacheKey: Record] + /// Merges a set of records into the cache. /// /// - Parameters: /// - records: The set of records to merge. /// - Returns: A set of keys corresponding to *fields* that have changed (i.e. QUERY_ROOT.Foo.myField). These are the same type of keys as are returned by RecordSet.merge(records:). - func merge(records: RecordSet) throws -> Set + func merge(records: RecordSet, identifier: UUID?) throws -> Set /// Removes a record for the specified key. This method will only /// remove whole records, not individual fields. diff --git a/Sources/ApolloSQLite/SQLiteNormalizedCache.swift b/Sources/ApolloSQLite/SQLiteNormalizedCache.swift index b8c276c896..c2ff5c188e 100644 --- a/Sources/ApolloSQLite/SQLiteNormalizedCache.swift +++ b/Sources/ApolloSQLite/SQLiteNormalizedCache.swift @@ -93,7 +93,7 @@ public final class SQLiteNormalizedCache { // MARK: - NormalizedCache conformance extension SQLiteNormalizedCache: NormalizedCache { - public func loadRecords(forKeys keys: Set) throws -> [CacheKey: Record] { + public func loadRecords(forKeys keys: Set, identifier: UUID? = nil) throws -> [CacheKey: Record] { return [CacheKey: Record](uniqueKeysWithValues: try selectRecords(for: keys) .map { record in @@ -101,7 +101,7 @@ extension SQLiteNormalizedCache: NormalizedCache { }) } - public func merge(records: RecordSet) throws -> Set { + public func merge(records: RecordSet, identifier: UUID? = nil) throws -> Set { return try mergeRecords(records: records) }