Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Split Caching review and thoughts #3377

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 15 additions & 10 deletions Sources/Apollo/ApolloStore.swift
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ public final class ApolloStore {
public func publish(records: RecordSet, identifier: UUID? = nil, callbackQueue: DispatchQueue = .main, completion: ((Result<Void, Error>) -> Void)? = nil) {
queue.async(flags: .barrier) {
do {
let changedKeys = try self.cache.merge(records: records)
let changedKeys = try self.cache.merge(records: records, identifier: identifier)
self.didChangeKeys(changedKeys, identifier: identifier)
DispatchQueue.apollo.returnResultAsyncIfNeeded(on: callbackQueue,
action: completion,
Expand Down Expand Up @@ -154,14 +154,15 @@ public final class ApolloStore {
/// - Parameters:
/// - query: The query to load results for
/// - resultHandler: The completion handler to execute on success or error
public func load<Operation: GraphQLOperation>(query: Operation, callbackQueue: DispatchQueue? = nil, resultHandler: @escaping GraphQLResultHandler<Operation.Data>) {
public func load<Operation: GraphQLOperation>(query: Operation, identifer: UUID? = nil, callbackQueue: DispatchQueue? = nil, resultHandler: @escaping GraphQLResultHandler<Operation.Data>) {
withinReadTransaction({ transaction in
let mapper = GraphQLSelectionSetMapper<Operation.Data>()
let dependencyTracker = GraphQLDependencyTracker()

let (data, dependentKeys) = try transaction.execute(selections: Operation.Data.selections,
onObjectWithKey: rootCacheKey(for: query),
variables: query.variables,
identifer: identifer,
accumulator: zip(mapper, dependencyTracker))

return GraphQLResult(data: data,
Expand Down Expand Up @@ -199,13 +200,17 @@ public final class ApolloStore {
accumulator: mapper)
}

fileprivate func execute<Accumulator: GraphQLResultAccumulator>(selections: [GraphQLSelection], onObjectWithKey key: CacheKey, variables: GraphQLMap?, accumulator: Accumulator) throws -> Accumulator.FinalResult {
let object = try loadObject(forKey: key).get()

fileprivate func execute<Accumulator: GraphQLResultAccumulator>(selections: [GraphQLSelection],
onObjectWithKey key: CacheKey,
variables: GraphQLMap?,
identifer: UUID? = nil,
accumulator: Accumulator) throws -> Accumulator.FinalResult {
let object = try loadObject(forKey: key, identifer: identifer).get()

let executor = GraphQLExecutor { object, info in
return object[info.cacheKeyForField]
} resolveReference: { reference in
self.loadObject(forKey: reference.key)
self.loadObject(forKey: reference.key, identifer: identifer)
}

executor.cacheKeyForObject = self.cacheKeyForObject
Expand All @@ -217,8 +222,8 @@ public final class ApolloStore {
accumulator: accumulator)
}

private final func loadObject(forKey key: CacheKey) -> PossiblyDeferred<JSONObject> {
self.loader[key].map { record in
private final func loadObject(forKey key: CacheKey, identifer: UUID? = nil) -> PossiblyDeferred<JSONObject> {
self.loader[key, identifer].map { record in
guard let record = record else { throw JSONDecodingError.missingValue }
return record.fields
}
Expand Down Expand Up @@ -308,8 +313,8 @@ public final class ApolloStore {
withKey: key,
variables: variables,
accumulator: normalizer)
let changedKeys = try self.cache.merge(records: records)
let changedKeys = try self.cache.merge(records: records, identifier: nil)

// Remove cached records, so subsequent reads
// within the same transaction will reload the updated value.
loader.removeAll()
Expand Down
12 changes: 6 additions & 6 deletions Sources/Apollo/DataLoader.swift
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import Foundation

final class DataLoader<Key: Hashable, Value> {
public typealias BatchLoad = (Set<Key>) throws -> [Key: Value]
public typealias BatchLoad = (Set<Key>, UUID?) throws -> [Key: Value]
private var batchLoad: BatchLoad

private var cache: [Key: Result<Value?, Error>] = [:]
Expand All @@ -11,25 +11,25 @@ final class DataLoader<Key: Hashable, Value> {
self.batchLoad = batchLoad
}

subscript(key: Key) -> PossiblyDeferred<Value?> {
subscript(key: Key, identifier: UUID? = nil) -> PossiblyDeferred<Value?> {
if let cachedResult = cache[key] {
return .immediate(cachedResult)
}

pendingLoads.insert(key)

return .deferred { try self.load(key) }
return .deferred { try self.load(key, identifier) }
}

private func load(_ key: Key) throws -> Value? {
private func load(_ key: Key, _ identifier: UUID?) throws -> Value? {
if let cachedResult = cache[key] {
return try cachedResult.get()
}

assert(pendingLoads.contains(key))

let values = try batchLoad(pendingLoads)
let values = try batchLoad(pendingLoads, identifier)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a problem here. You could theoretically use the subscript to add pendingLoads with one identifier, and then call load with a different identifier, which would then run the batch load the the second identifier, even for the pendingLoads that should use the other identifier.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahh good catch, although I was able to fix this and get all the tests to pass by separating identifier loads vs non identifier loads by hashing the identifier with the cache key internal to the loader, kinda started to question whether this was all really needed when reading the cache as you pointed out.


for key in pendingLoads {
cache[key] = .success(values[key])
}
Expand Down
5 changes: 3 additions & 2 deletions Sources/Apollo/InMemoryNormalizedCache.swift
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
import Foundation

// Add max size and auto eviction policy
public final class InMemoryNormalizedCache: NormalizedCache {
private var records: RecordSet

public init(records: RecordSet = RecordSet()) {
self.records = records
}

public func loadRecords(forKeys keys: Set<CacheKey>) throws -> [CacheKey: Record] {
public func loadRecords(forKeys keys: Set<CacheKey>, identifier: UUID? = nil) throws -> [CacheKey: Record] {
return keys.reduce(into: [:]) { result, key in
result[key] = records[key]
}
Expand All @@ -17,7 +18,7 @@ public final class InMemoryNormalizedCache: NormalizedCache {
records.removeRecord(for: key)
}

public func merge(records newRecords: RecordSet) throws -> Set<CacheKey> {
public func merge(records newRecords: RecordSet, identifier: UUID? = nil) throws -> Set<CacheKey> {
return records.merge(records: newRecords)
}

Expand Down
6 changes: 3 additions & 3 deletions Sources/Apollo/NormalizedCache.swift
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@ public protocol NormalizedCache {
/// - Parameters:
/// - key: The cache keys to load data for
/// - Returns: A dictionary of cache keys to records containing the records that have been found.
func loadRecords(forKeys keys: Set<CacheKey>) throws -> [CacheKey: Record]
func loadRecords(forKeys keys: Set<CacheKey>, identifier: UUID?) throws -> [CacheKey: Record]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm wondering why you need the identifier added to the loadRecords method. Neither of the implementations in this PR seem to be even using that parameter.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The goal was to use it in the custom implementation, but as pointed out, will mostly not need this and will remove.


/// Merges a set of records into the cache.
///
/// - Parameters:
/// - records: The set of records to merge.
/// - Returns: A set of keys corresponding to *fields* that have changed (i.e. QUERY_ROOT.Foo.myField). These are the same type of keys as are returned by RecordSet.merge(records:).
func merge(records: RecordSet) throws -> Set<CacheKey>
func merge(records: RecordSet, identifier: UUID?) throws -> Set<CacheKey>

/// Removes a record for the specified key. This method will only
/// remove whole records, not individual fields.
Expand Down
4 changes: 2 additions & 2 deletions Sources/ApolloSQLite/SQLiteNormalizedCache.swift
Original file line number Diff line number Diff line change
Expand Up @@ -93,15 +93,15 @@ public final class SQLiteNormalizedCache {
// MARK: - NormalizedCache conformance

extension SQLiteNormalizedCache: NormalizedCache {
public func loadRecords(forKeys keys: Set<CacheKey>) throws -> [CacheKey: Record] {
public func loadRecords(forKeys keys: Set<CacheKey>, identifier: UUID? = nil) throws -> [CacheKey: Record] {
return [CacheKey: Record](uniqueKeysWithValues:
try selectRecords(for: keys)
.map { record in
(record.key, record)
})
}

public func merge(records: RecordSet) throws -> Set<CacheKey> {
public func merge(records: RecordSet, identifier: UUID? = nil) throws -> Set<CacheKey> {
return try mergeRecords(records: records)
}

Expand Down