From 69554283d8fb903eae7bc4d6e2eb2a7b4d748614 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 30 Jan 2025 12:49:02 +0100 Subject: [PATCH 01/38] Modify the matrix-sdk and add feature flag for the event cache store --- crates/matrix-sdk-indexeddb/Cargo.toml | 3 ++- crates/matrix-sdk-indexeddb/src/lib.rs | 18 ++++++++++++++++++ crates/matrix-sdk/Cargo.toml | 2 +- crates/matrix-sdk/src/client/builder/mod.rs | 10 ++++++++-- 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/Cargo.toml b/crates/matrix-sdk-indexeddb/Cargo.toml index e06e315b060..80426a08246 100644 --- a/crates/matrix-sdk-indexeddb/Cargo.toml +++ b/crates/matrix-sdk-indexeddb/Cargo.toml @@ -14,8 +14,9 @@ default-target = "wasm32-unknown-unknown" rustdoc-args = ["--cfg", "docsrs"] [features] -default = ["e2e-encryption", "state-store"] +default = ["e2e-encryption", "state-store", "event-cache-store"] state-store = ["dep:matrix-sdk-base", "growable-bloom-filter"] +event-cache-store = ["dep:matrix-sdk-base"] e2e-encryption = ["dep:matrix-sdk-crypto"] testing = ["matrix-sdk-crypto?/testing"] diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index a828ac6755c..540a55c626b 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -58,6 +58,24 @@ pub async fn open_state_store( Ok(state_store) } +/// Create an ['IndexeddbEventCacheStore'] +/// +/// If a `passphrase` is given, the store will be encrypted using a key derived +/// from that passphrase. +#[cfg(feature = "event-cache-store")] +pub async fn open_event_cache_store( + name: &str, + passphrase: Option<&str>, +) -> Result { + let mut builder = IndexeddbEventCacheStore::builder().name(name.to_owned()); + if let Some(passphrase) = passphrase { + builder = builder.passphrase(passphrase.to_owned()); + } + let event_cache_store = builder.build().await.map_err(StoreError::from)?; + + Ok(event_cache_store) +} + /// All the errors that can occur when opening an IndexedDB store. #[derive(Error, Debug)] pub enum OpenStoreError { diff --git a/crates/matrix-sdk/Cargo.toml b/crates/matrix-sdk/Cargo.toml index 683f4582169..ba41aa62a2e 100644 --- a/crates/matrix-sdk/Cargo.toml +++ b/crates/matrix-sdk/Cargo.toml @@ -32,7 +32,7 @@ sqlite = [ "matrix-sdk-sqlite?/event-cache" ] bundled-sqlite = ["sqlite", "matrix-sdk-sqlite?/bundled"] -indexeddb = ["matrix-sdk-indexeddb/state-store"] +indexeddb = ["matrix-sdk-indexeddb/state-store", "matrix-sdk-indexeddb/event-cache-store"] qrcode = ["e2e-encryption", "matrix-sdk-base/qrcode"] automatic-room-key-forwarding = ["e2e-encryption", "matrix-sdk-base/automatic-room-key-forwarding"] diff --git a/crates/matrix-sdk/src/client/builder/mod.rs b/crates/matrix-sdk/src/client/builder/mod.rs index bf4ab18caac..5a98b5e1664 100644 --- a/crates/matrix-sdk/src/client/builder/mod.rs +++ b/crates/matrix-sdk/src/client/builder/mod.rs @@ -635,10 +635,16 @@ async fn build_indexeddb_store_config( }; let store_config = { - tracing::warn!("The IndexedDB backend does not implement an event cache store, falling back to the in-memory event cache store…"); - store_config.event_cache_store(matrix_sdk_base::event_cache::store::MemoryStore::new()) + let event_cache_store = + matrix_sdk_indexeddb::open_event_cache_store(name, passphrase).await?; + store_config.event_cache_store(event_cache_store) }; + // let store_config = { + // tracing::warn!("The IndexedDB backend does not implement an event cache store, falling back to the in-memory event cache store…"); + // store_config.event_cache_store(matrix_sdk_base::event_cache::store::MemoryStore::new()) + // }; + Ok(store_config) } From b8bfd3e328c9862305186229d26d80cd1373c77e Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 30 Jan 2025 13:05:59 +0100 Subject: [PATCH 02/38] Create the basic struct to initialize an event_cache_store --- .../src/event_cache_store/mod.rs | 56 +++++++++++++++++++ crates/matrix-sdk-indexeddb/src/lib.rs | 4 ++ 2 files changed, 60 insertions(+) create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs new file mode 100644 index 00000000000..a56b8cf42f6 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -0,0 +1,56 @@ +use matrix::sdk::base::event_cache::store::EventCacheStore; + +pub struct IndexeddbEventCacheStore { + name: String, + pub(crate) inner: IdbDatabase, + pub(crate) meta: IdbDatabase, + pub(crate) store_cipher: Option>, +} + +#[cfg(not(tarpaulin_include))] +impl std::fmt::Debug for IndexeddbEventCacheStore { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("IndexeddbEventCacheStore").finish() + } +} + +impl IndexeddbEventCacheStore { + pub fn builder() -> IndexeddbEventCacheStoreBuilder { + IndexeddbEventCacheStoreBuilder::new() + } +} + +#[async_trait] +impl EventCacheStore for IndexeddbEventCacheStore {} + +/// Builder for [`IndexeddbEventCacheStore`] +#[derive(Debug)] +pub struct IndexeddbEventCacheStoreBuilder { + name: Option, + passphrase: Option, +} + +impl IndexeddbEventCacheStoreBuilder { + fn new() -> Self { + Self { name: None, passphrase: None } + } + + pub fn name(mut self, name: String) -> Self { + self.name = Some(name); + self + } + + pub fn passphrase(mut self, passphrase: String) -> Self { + self.passphrase = Some(passphrase); + self + } + + pub async fn build(self) -> Result { + let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); + + let (meta, store_cipher) = upgrade_meta_db(&meta_name, self.passphrase.as_deref()).await?; + let inner = upgrade_inner_db(&name, store_cipher.as_deref(), &meta).await?; + + Ok(IndexeddbEventCacheStore { name, inner, meta, store_cipher }) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index 540a55c626b..73875e5596a 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -6,6 +6,8 @@ use thiserror::Error; #[cfg(feature = "e2e-encryption")] mod crypto_store; +#[cfg(feature = "event-cache-store")] +mod event_cache_store; mod safe_encode; #[cfg(feature = "e2e-encryption")] mod serialize_bool_for_indexeddb; @@ -14,6 +16,8 @@ mod state_store; #[cfg(feature = "e2e-encryption")] pub use crypto_store::{IndexeddbCryptoStore, IndexeddbCryptoStoreError}; +#[cfg(feature = "event-cache-store")] +pub use event_cache_store::{IndexeddbEventCacheStore, IndexeddbEventCacheStoreBuilder}; #[cfg(feature = "state-store")] pub use state_store::{ IndexeddbStateStore, IndexeddbStateStoreBuilder, IndexeddbStateStoreError, From c2761d50fbb0221a40c3911b93a35b6e675dcd37 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 30 Jan 2025 18:15:50 +0100 Subject: [PATCH 03/38] Almost working instantiation of event cache store --- .../event_cache_store/indexeddb_serializer.rs | 489 ++++++++++++++++++ .../src/event_cache_store/migrations.rs | 10 + .../src/event_cache_store/mod.rs | 98 +++- crates/matrix-sdk-indexeddb/src/lib.rs | 11 +- 4 files changed, 595 insertions(+), 13 deletions(-) create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs new file mode 100644 index 00000000000..6b3c70bc0cd --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs @@ -0,0 +1,489 @@ +// Copyright 2023 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::sync::Arc; + +use base64::{ + alphabet, + engine::{general_purpose, GeneralPurpose}, + Engine, +}; +use gloo_utils::format::JsValueSerdeExt; +use matrix_sdk_crypto::CryptoStoreError; +use matrix_sdk_store_encryption::{EncryptedValueBase64, StoreCipher}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use wasm_bindgen::JsValue; +use web_sys::IdbKeyRange; +use zeroize::Zeroizing; + +use crate::{safe_encode::SafeEncode, IndexeddbEventCacheStoreError}; + +type Result = std::result::Result; + +const BASE64: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD); + +/// Handles the functionality of serializing and encrypting data for the +/// indexeddb store. +pub struct IndexeddbSerializer { + store_cipher: Option>, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(untagged)] +pub enum MaybeEncrypted { + Encrypted(EncryptedValueBase64), + Unencrypted(String), +} + +impl IndexeddbSerializer { + pub fn new(store_cipher: Option>) -> Self { + Self { store_cipher } + } + + /// Hash the given key securely for the given tablename, using the store + /// cipher. + /// + /// First calls [`SafeEncode::as_encoded_string`] + /// on the `key` to encode it into a formatted string. + /// + /// Then, if a cipher is configured, hashes the formatted key and returns + /// the hash encoded as unpadded base64. + /// + /// If no cipher is configured, just returns the formatted key. + /// + /// This is faster than [`Self::serialize_value`] and reliably gives the + /// same output for the same input, making it suitable for index keys. + pub fn encode_key(&self, table_name: &str, key: T) -> JsValue + where + T: SafeEncode, + { + self.encode_key_as_string(table_name, key).into() + } + + /// Hash the given key securely for the given tablename, using the store + /// cipher. + /// + /// The same as [`Self::encode_key`], but stops short of converting the + /// resulting base64 string into a JsValue + pub fn encode_key_as_string(&self, table_name: &str, key: T) -> String + where + T: SafeEncode, + { + match &self.store_cipher { + Some(cipher) => key.as_secure_string(table_name, cipher), + None => key.as_encoded_string(), + } + } + + pub fn encode_to_range( + &self, + table_name: &str, + key: T, + ) -> Result + where + T: SafeEncode, + { + match &self.store_cipher { + Some(cipher) => key.encode_to_range_secure(table_name, cipher), + None => key.encode_to_range(), + } + .map_err(|e| IndexeddbEventCacheStoreError::DomException { + code: 0, + name: "IdbKeyRangeMakeError".to_owned(), + message: e, + }) + } + + /// Encode the value for storage as a value in indexeddb. + /// + /// A thin wrapper around [`IndexeddbSerializer::maybe_encrypt_value`]: + /// encrypts the given object, and then turns the [`MaybeEncrypted`] + /// result into a JS object for storage in indexeddb. + pub fn serialize_value( + &self, + value: &impl Serialize, + ) -> Result { + let serialized = self.maybe_encrypt_value(value)?; + Ok(serde_wasm_bindgen::to_value(&serialized)?) + } + + /// Encode the value for storage as a value in indexeddb. + /// + /// Returns a byte vector which is either the JSON serialisation of the + /// value, or an encrypted version thereof. + /// + /// Avoid using this in new code. Prefer + /// [`IndexeddbSerializer::serialize_value`] or + /// [`IndexeddbSerializer::maybe_encrypt_value`]. + pub fn serialize_value_as_bytes( + &self, + value: &impl Serialize, + ) -> Result, CryptoStoreError> { + match &self.store_cipher { + Some(cipher) => cipher.encrypt_value(value).map_err(CryptoStoreError::backend), + None => serde_json::to_vec(value).map_err(CryptoStoreError::backend), + } + } + + /// Encode an object for storage as a value in indexeddb. + /// + /// First serializes the object as JSON bytes. + /// + /// Then, if a cipher is set, encrypts the JSON with a nonce into binary + /// blobs, and base64-encodes the blobs. + /// + /// If no cipher is set, just base64-encodes the JSON bytes. + /// + /// Finally, returns an object encapsulating the result. + pub fn maybe_encrypt_value( + &self, + value: T, + ) -> Result { + // First serialize the object as JSON. + let serialized = serde_json::to_vec(&value).map_err(CryptoStoreError::backend)?; + + // Then either encrypt the JSON, or just base64-encode it. + Ok(match &self.store_cipher { + Some(cipher) => MaybeEncrypted::Encrypted( + cipher.encrypt_value_base64_data(serialized).map_err(CryptoStoreError::backend)?, + ), + None => MaybeEncrypted::Unencrypted(BASE64.encode(serialized)), + }) + } + + /// Decode a value that was previously encoded with + /// [`Self::serialize_value`]. + pub fn deserialize_value( + &self, + value: JsValue, + ) -> Result { + // Objects which are serialized nowadays should be represented as a + // `MaybeEncrypted`. However, `serialize_value` previously used a + // different format, so we need to handle that in case we have old data. + // + // If we can convert the JsValue into a `MaybeEncrypted`, then it's probably one + // of those. + // + // - `MaybeEncrypted::Encrypted` becomes a JS object with properties {`version`, + // `nonce`, `ciphertext`}. + // + // - `MaybeEncrypted::Unencrypted` becomes a JS string containing base64 text. + // + // Otherwise, it probably uses our old serialization format: + // + // - Encrypted values were: serialized to an array of JSON bytes; encrypted to + // an array of u8 bytes; stored in a Rust object; serialized (again) into an + // array of JSON bytes. Net result is a JS array. + // + // - Unencrypted values were serialized to JSON, then deserialized into a + // javascript object/string/array/bool. + // + // Note that there are several potential ambiguities here: + // + // - A JS string could either be a legacy unencrypted value, or a + // `MaybeEncrypted::Unencrypted`. However, the only thing that actually got + // stored as a string under the legacy system was `backup_key_v1`, and that is + // special-cased not to use this path — so if we can convert it into a + // `MaybeEncrypted::Unencrypted`, then we assume it is one. + // + // - A JS array could be either a legacy encrypted value or a legacy unencrypted + // value. We can tell the difference by whether we have a `cipher`. + // + // - A JS object could be either a legacy unencrypted value or a + // `MaybeEncrypted::Encrypted`. We assume that no legacy JS objects have the + // properties to be successfully decoded into a `MaybeEncrypted::Encrypted`. + + // First check if it looks like a `MaybeEncrypted`, of either type. + if let Ok(maybe_encrypted) = serde_wasm_bindgen::from_value(value.clone()) { + return Ok(self.maybe_decrypt_value(maybe_encrypted)?); + } + + // Otherwise, fall back to the legacy deserializer. + self.deserialize_legacy_value(value) + } + + /// Decode a value that was encoded with an old version of + /// `serialize_value`. + /// + /// This should only be used on values from an old database which are known + /// to be serialized with the old format. + pub fn deserialize_legacy_value( + &self, + value: JsValue, + ) -> Result { + match &self.store_cipher { + Some(cipher) => { + if !value.is_array() { + return Err(IndexeddbEventCacheStoreError::CryptoStoreError( + CryptoStoreError::UnpicklingError, + )); + } + + // Looks like legacy encrypted format. + // + // `value` is a JS-side array containing the byte values. Turn it into a + // rust-side Vec, then decrypt. + let value: Vec = serde_wasm_bindgen::from_value(value)?; + Ok(cipher.decrypt_value(&value).map_err(CryptoStoreError::backend)?) + } + + None => { + // Legacy unencrypted format could be just about anything; just try + // JSON-serializing the value, then deserializing it into the + // desired type. + // + // Note that the stored data was actually encoded by JSON-serializing it, and + // then deserializing the JSON into Javascript objects — so, for + // example, `HashMap`s are converted into Javascript Objects + // (whose keys are always strings) rather than Maps (whose keys + // can be other things). `serde_wasm_bindgen::from_value` will complain about + // such things. The correct thing to do is to go *back* to JSON + // and then deserialize into Rust again, which is what `JsValue::into_serde` + // does. + Ok(value.into_serde()?) + } + } + } + + /// Decode a value that was previously encoded with + /// [`Self::serialize_value_as_bytes`] + pub fn deserialize_value_from_bytes( + &self, + value: &[u8], + ) -> Result { + if let Some(cipher) = &self.store_cipher { + cipher.decrypt_value(value).map_err(CryptoStoreError::backend) + } else { + serde_json::from_slice(value).map_err(CryptoStoreError::backend) + } + } + + /// Decode a value that was previously encoded with + /// [`Self::maybe_encrypt_value`] + pub fn maybe_decrypt_value( + &self, + value: MaybeEncrypted, + ) -> Result { + // First extract the plaintext JSON, either by decrypting or un-base64-ing. + let plaintext = Zeroizing::new(match (&self.store_cipher, value) { + (Some(cipher), MaybeEncrypted::Encrypted(enc)) => { + cipher.decrypt_value_base64_data(enc).map_err(CryptoStoreError::backend)? + } + (None, MaybeEncrypted::Unencrypted(unc)) => { + BASE64.decode(unc).map_err(CryptoStoreError::backend)? + } + + _ => return Err(CryptoStoreError::UnpicklingError), + }); + + // Then deserialize the JSON. + Ok(serde_json::from_slice(&plaintext)?) + } +} + +#[cfg(all(test, target_arch = "wasm32"))] +mod tests { + use std::{collections::BTreeMap, sync::Arc}; + + use gloo_utils::format::JsValueSerdeExt; + use matrix_sdk_store_encryption::StoreCipher; + use matrix_sdk_test::async_test; + use serde::{Deserialize, Serialize}; + use serde_json::json; + use wasm_bindgen::JsValue; + + use super::IndexeddbSerializer; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + /// Test that `serialize_value`/`deserialize_value` will round-trip, when a + /// cipher is in use. + #[async_test] + async fn test_serialize_deserialize_with_cipher() { + let serializer = IndexeddbSerializer::new(Some(Arc::new(StoreCipher::new().unwrap()))); + + let obj = make_test_object(); + let serialized = serializer.serialize_value(&obj).expect("could not serialize"); + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `serialize_value`/`deserialize_value` will round-trip, when no + /// cipher is in use. + #[async_test] + async fn test_serialize_deserialize_no_cipher() { + let serializer = IndexeddbSerializer::new(None); + let obj = make_test_object(); + let serialized = serializer.serialize_value(&obj).expect("could not serialize"); + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `deserialize_value` can decode a value that was encoded with + /// an old implementation of `serialize_value`, when a cipher is in use. + #[async_test] + async fn test_deserialize_old_serialized_value_with_cipher() { + let cipher = test_cipher(); + let obj = make_test_object(); + + // Follow the old format for encoding: + // 1. Encode as JSON, in a Vec of bytes + // 2. Encrypt + // 3. JSON-encode to another Vec + // 4. Turn the Vec into a Javascript array of numbers. + let data = serde_json::to_vec(&obj).unwrap(); + let data = cipher.encrypt_value_data(data).unwrap(); + let data = serde_json::to_vec(&data).unwrap(); + let serialized = JsValue::from_serde(&data).unwrap(); + + // Now, try deserializing with `deserialize_value`, and check we get the right + // thing. + let serializer = IndexeddbSerializer::new(Some(Arc::new(cipher))); + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `deserialize_value` can decode a value that was encoded with + /// an old implementation of `serialize_value`, when no cipher is in use. + #[async_test] + async fn test_deserialize_old_serialized_value_no_cipher() { + // An example of an object which was serialized using the old-format + // `serialize_value`. + let json = json!({ "id":0, "name": "test", "map": { "0": "test" }}); + let serialized = js_sys::JSON::parse(&json.to_string()).unwrap(); + + let serializer = IndexeddbSerializer::new(None); + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(make_test_object(), deserialized); + } + + /// Test that `deserialize_value` can decode an array value that was encoded + /// with an old implementation of `serialize_value`, when no cipher is + /// in use. + #[async_test] + async fn test_deserialize_old_serialized_array_no_cipher() { + let json = json!([1, 2, 3, 4]); + let serialized = js_sys::JSON::parse(&json.to_string()).unwrap(); + + let serializer = IndexeddbSerializer::new(None); + let deserialized: Vec = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(vec![1, 2, 3, 4], deserialized); + } + + /// Test that `deserialize_value` can decode a value encoded with + /// `maybe_encrypt_value`, when a cipher is in use. + #[async_test] + async fn test_maybe_encrypt_deserialize_with_cipher() { + let serializer = IndexeddbSerializer::new(Some(Arc::new(StoreCipher::new().unwrap()))); + + let obj = make_test_object(); + let serialized = serializer.maybe_encrypt_value(&obj).expect("could not serialize"); + let serialized = serde_wasm_bindgen::to_value(&serialized).unwrap(); + + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `deserialize_value` can decode a value encoded with + /// `maybe_encrypt_value`, when no cipher is in use. + #[async_test] + async fn test_maybe_encrypt_deserialize_no_cipher() { + let serializer = IndexeddbSerializer::new(None); + let obj = make_test_object(); + let serialized = serializer.maybe_encrypt_value(&obj).expect("could not serialize"); + let serialized = serde_wasm_bindgen::to_value(&serialized).unwrap(); + let deserialized: TestStruct = + serializer.deserialize_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `maybe_encrypt_value`/`maybe_decrypt_value` will round-trip, + /// when a cipher is in use. + #[async_test] + async fn test_maybe_encrypt_decrypt_with_cipher() { + let serializer = IndexeddbSerializer::new(Some(Arc::new(StoreCipher::new().unwrap()))); + + let obj = make_test_object(); + let serialized = serializer.maybe_encrypt_value(&obj).expect("could not serialize"); + let deserialized: TestStruct = + serializer.maybe_decrypt_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + /// Test that `maybe_encrypt_value`/`maybe_decrypt_value` will round-trip, + /// when no cipher is in use. + #[async_test] + async fn test_maybe_encrypt_decrypt_no_cipher() { + let serializer = IndexeddbSerializer::new(None); + + let obj = make_test_object(); + let serialized = serializer.maybe_encrypt_value(&obj).expect("could not serialize"); + let deserialized: TestStruct = + serializer.maybe_decrypt_value(serialized).expect("could not deserialize"); + + assert_eq!(obj, deserialized); + } + + #[derive(Serialize, Deserialize, PartialEq, Debug)] + struct TestStruct { + id: u32, + name: String, + + // A map, whose keys are not strings. This is an edge-case we previously got wrong. Maps + // are represented differently in JSON from Javascript objects, and that particularly + // matters when their keys are not strings. + map: BTreeMap, + } + + fn make_test_object() -> TestStruct { + TestStruct { id: 0, name: "test".to_owned(), map: BTreeMap::from([(0, "test".to_owned())]) } + } + + /// Build a [`StoreCipher`] using a hardcoded key. + fn test_cipher() -> StoreCipher { + StoreCipher::import_with_key( + &[0u8; 32], + &[ + 130, 168, 107, 100, 102, 95, 105, 110, 102, 111, 164, 78, 111, 110, 101, 175, 99, + 105, 112, 104, 101, 114, 116, 101, 120, 116, 95, 105, 110, 102, 111, 129, 176, 67, + 104, 97, 67, 104, 97, 50, 48, 80, 111, 108, 121, 49, 51, 48, 53, 130, 165, 110, + 111, 110, 99, 101, 220, 0, 24, 13, 204, 160, 204, 133, 204, 180, 204, 224, 204, + 158, 95, 14, 94, 204, 133, 110, 3, 204, 225, 204, 174, 54, 204, 144, 204, 205, 204, + 190, 204, 155, 74, 118, 81, 87, 204, 156, 170, 99, 105, 112, 104, 101, 114, 116, + 101, 120, 116, 220, 0, 80, 204, 226, 204, 205, 58, 101, 88, 204, 141, 204, 218, 2, + 112, 204, 252, 48, 204, 169, 204, 233, 58, 4, 60, 96, 66, 22, 204, 192, 4, 4, 63, + 109, 204, 157, 204, 166, 17, 55, 85, 102, 89, 204, 145, 110, 204, 250, 39, 18, 19, + 204, 191, 204, 156, 71, 204, 142, 75, 204, 251, 204, 218, 204, 130, 204, 132, 204, + 240, 86, 204, 141, 77, 64, 204, 132, 204, 241, 204, 177, 12, 204, 224, 102, 106, 4, + 204, 141, 89, 101, 30, 45, 38, 105, 104, 204, 156, 96, 204, 203, 204, 224, 34, 125, + 204, 157, 204, 160, 38, 204, 158, 204, 155, 16, 204, 150, + ], + ) + .unwrap() + } +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs new file mode 100644 index 00000000000..69713539eb1 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -0,0 +1,10 @@ +use indexed_db_futures::IdbDatabase; + +use super::{indexeddb_serializer::IndexeddbSerializer, IndexeddbEventCacheStoreError}; + +pub async fn open_and_upgrade_db( + name: &str, + _serializer: &IndexeddbSerializer, +) -> Result { + Ok(IdbDatabase::open(name)?.await?) +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index a56b8cf42f6..9465cb27ef5 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -1,10 +1,73 @@ +use std::sync::Arc; + +use crate::event_cache_store::{ + indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, +}; +use async_trait::async_trait; +use indexed_db_futures::IdbDatabase; use matrix::sdk::base::event_cache::store::EventCacheStore; +use matrix_sdk_base::StoreError; +use matrix_sdk_store_encryption::{Error as EncryptionError, StoreCipher}; +use tracing::debug; + +mod indexeddb_serializer; +mod migrations; + +#[derive(Debug, thiserror::Error)] +pub enum IndexeddbEventCacheStoreError { + #[error(transparent)] + Json(#[from] serde_json::Error), + #[error(transparent)] + Encryption(#[from] EncryptionError), + #[error("DomException {name} ({code}): {message}")] + DomException { name: String, message: String, code: u16 }, + #[error(transparent)] + StoreError(#[from] StoreError), + #[error("Can't migrate {name} from {old_version} to {new_version} without deleting data. See MigrationConflictStrategy for ways to configure.")] + MigrationConflict { name: String, old_version: u32, new_version: u32 }, +} + +impl From for IndexeddbEventCacheStoreError { + fn from(frm: web_sys::DomException) -> IndexeddbEventCacheStoreError { + IndexeddbEventCacheStoreError::DomException { + name: frm.name(), + message: frm.message(), + code: frm.code(), + } + } +} + +impl From for StoreError { + fn from(e: IndexeddbEventCacheStoreError) -> Self { + match e { + IndexeddbEventCacheStoreError::Json(e) => StoreError::Json(e), + IndexeddbEventCacheStoreError::StoreError(e) => e, + IndexeddbEventCacheStoreError::Encryption(e) => StoreError::Encryption(e), + _ => StoreError::backend(e), + } + } +} + +type Result = std::result::Result; + +/// Sometimes Migrations can't proceed without having to drop existing +/// data. This allows you to configure, how these cases should be handled. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum MigrationConflictStrategy { + /// Just drop the data, we don't care that we have to sync again + Drop, + /// Raise a [`IndexeddbStateStoreError::MigrationConflict`] error with the + /// path to the DB in question. The caller then has to take care about + /// what they want to do and try again after. + Raise, + /// Default. + BackupAndDrop, +} pub struct IndexeddbEventCacheStore { name: String, pub(crate) inner: IdbDatabase, - pub(crate) meta: IdbDatabase, - pub(crate) store_cipher: Option>, + pub(crate) serializer: IndexeddbSerializer, } #[cfg(not(tarpaulin_include))] @@ -27,12 +90,17 @@ impl EventCacheStore for IndexeddbEventCacheStore {} #[derive(Debug)] pub struct IndexeddbEventCacheStoreBuilder { name: Option, - passphrase: Option, + store_cipher: Option>, + migration_conflict_strategy: MigrationConflictStrategy, } impl IndexeddbEventCacheStoreBuilder { fn new() -> Self { - Self { name: None, passphrase: None } + Self { + name: None, + store_cipher: None, + migration_conflict_strategy: MigrationConflictStrategy::BackupAndDrop, + } } pub fn name(mut self, name: String) -> Self { @@ -40,17 +108,27 @@ impl IndexeddbEventCacheStoreBuilder { self } - pub fn passphrase(mut self, passphrase: String) -> Self { - self.passphrase = Some(passphrase); + pub fn store_cipher(mut self, store_cipher: Arc) -> Self { + self.store_cipher = Some(store_cipher); + self + } + + /// The strategy to use when a merge conflict is found. + /// + /// See [`MigrationConflictStrategy`] for details. + pub fn migration_conflict_strategy(mut self, value: MigrationConflictStrategy) -> Self { + self.migration_conflict_strategy = value; self } - pub async fn build(self) -> Result { + pub async fn build(self) -> Result { + // let migration_strategy = self.migration_conflict_strategy.clone(); let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); - let (meta, store_cipher) = upgrade_meta_db(&meta_name, self.passphrase.as_deref()).await?; - let inner = upgrade_inner_db(&name, store_cipher.as_deref(), &meta).await?; + let serializer = IndexeddbSerializer::new(self.store_cipher); + debug!("IndexedDbEventCacheStore: opening main store {name}"); + let inner = open_and_upgrade_db(&name, &serializer).await?; - Ok(IndexeddbEventCacheStore { name, inner, meta, store_cipher }) + Ok(IndexeddbEventCacheStore { name, inner, serializer }) } } diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index 73875e5596a..36e9fa66726 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -2,6 +2,10 @@ #[cfg(feature = "state-store")] use matrix_sdk_base::store::StoreError; +#[cfg(feature = "event-cache-store")] +use matrix_sdk_store_encryption::StoreCipher; +use std::sync::Arc; +#[cfg(feature = "event-cache-store")] use thiserror::Error; #[cfg(feature = "e2e-encryption")] @@ -69,12 +73,13 @@ pub async fn open_state_store( #[cfg(feature = "event-cache-store")] pub async fn open_event_cache_store( name: &str, - passphrase: Option<&str>, + store_cipher: Option>, ) -> Result { let mut builder = IndexeddbEventCacheStore::builder().name(name.to_owned()); - if let Some(passphrase) = passphrase { - builder = builder.passphrase(passphrase.to_owned()); + if let Some(store_cipher) = store_cipher { + builder = builder.store_cipher(store_cipher.clone()); } + let event_cache_store = builder.build().await.map_err(StoreError::from)?; Ok(event_cache_store) From 3c6ca6a7493a60cfaea624c73a346e1233f73be4 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 31 Jan 2025 09:42:35 +0100 Subject: [PATCH 04/38] Fix deps on feature indexeddb --- crates/matrix-sdk-indexeddb/Cargo.toml | 2 +- crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 2 +- crates/matrix-sdk-indexeddb/src/lib.rs | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/Cargo.toml b/crates/matrix-sdk-indexeddb/Cargo.toml index 80426a08246..c573bfec2f5 100644 --- a/crates/matrix-sdk-indexeddb/Cargo.toml +++ b/crates/matrix-sdk-indexeddb/Cargo.toml @@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["e2e-encryption", "state-store", "event-cache-store"] state-store = ["dep:matrix-sdk-base", "growable-bloom-filter"] -event-cache-store = ["dep:matrix-sdk-base"] +event-cache-store = ["dep:matrix-sdk-base", "dep:matrix-sdk-crypto"] e2e-encryption = ["dep:matrix-sdk-crypto"] testing = ["matrix-sdk-crypto?/testing"] diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 9465cb27ef5..aa93482b236 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -5,7 +5,7 @@ use crate::event_cache_store::{ }; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; -use matrix::sdk::base::event_cache::store::EventCacheStore; +use matrix_sdk_base::event_cache::store::EventCacheStore; use matrix_sdk_base::StoreError; use matrix_sdk_store_encryption::{Error as EncryptionError, StoreCipher}; use tracing::debug; diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index 36e9fa66726..085588be774 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -21,7 +21,9 @@ mod state_store; #[cfg(feature = "e2e-encryption")] pub use crypto_store::{IndexeddbCryptoStore, IndexeddbCryptoStoreError}; #[cfg(feature = "event-cache-store")] -pub use event_cache_store::{IndexeddbEventCacheStore, IndexeddbEventCacheStoreBuilder}; +pub use event_cache_store::{ + IndexeddbEventCacheStore, IndexeddbEventCacheStoreBuilder, IndexeddbEventCacheStoreError, +}; #[cfg(feature = "state-store")] pub use state_store::{ IndexeddbStateStore, IndexeddbStateStoreBuilder, IndexeddbStateStoreError, From 7eb8946ab856150d96c9ff748d532a2f6fa09c94 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 31 Jan 2025 09:43:04 +0100 Subject: [PATCH 05/38] Add license text at top --- .../src/event_cache_store/mod.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index aa93482b236..2087ac1f073 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -1,4 +1,16 @@ -use std::sync::Arc; +// Copyright 2020 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. use crate::event_cache_store::{ indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, @@ -8,6 +20,7 @@ use indexed_db_futures::IdbDatabase; use matrix_sdk_base::event_cache::store::EventCacheStore; use matrix_sdk_base::StoreError; use matrix_sdk_store_encryption::{Error as EncryptionError, StoreCipher}; +use std::sync::Arc; use tracing::debug; mod indexeddb_serializer; From 64e68ba482a1c642337125fada1f6d7fe5d7e0b0 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 31 Jan 2025 13:09:03 +0100 Subject: [PATCH 06/38] Copy try_take_leased_lock from crypto_store impl --- .../src/event_cache_store/mod.rs | 61 ++++++++++++++++++- 1 file changed, 60 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 2087ac1f073..6ba27741ca7 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -97,7 +97,66 @@ impl IndexeddbEventCacheStore { } #[async_trait] -impl EventCacheStore for IndexeddbEventCacheStore {} +impl EventCacheStore for IndexeddbEventCacheStore { + type Error = IndexeddbEventCacheStoreError; + + async fn handle_linked_chunk_updates( + &self, + room_id: &RoomId, + updates: Vec>, + ) -> Result<()> { + Ok(()) + } + + async fn try_take_leased_lock( + &self, + lease_duration_ms: u32, + key: &str, + holder: &str, + ) -> Result { + // As of 2023-06-23, the code below hasn't been tested yet. + let key = JsValue::from_str(key); + let txn = + self.inner.transaction_on_one_with_mode(keys::CORE, IdbTransactionMode::Readwrite)?; + let object_store = txn.object_store(keys::CORE)?; + + #[derive(serde::Deserialize, serde::Serialize)] + struct Lease { + holder: String, + expiration_ts: u64, + } + + let now_ts: u64 = MilliSecondsSinceUnixEpoch::now().get().into(); + let expiration_ts = now_ts + lease_duration_ms as u64; + + let prev = object_store.get(&key)?.await?; + match prev { + Some(prev) => { + let lease: Lease = self.serializer.deserialize_value(prev)?; + if lease.holder == holder || lease.expiration_ts < now_ts { + object_store.put_key_val( + &key, + &self + .serializer + .serialize_value(&Lease { holder: holder.to_owned(), expiration_ts })?, + )?; + Ok(true) + } else { + Ok(false) + } + } + None => { + object_store.put_key_val( + &key, + &self + .serializer + .serialize_value(&Lease { holder: holder.to_owned(), expiration_ts })?, + )?; + Ok(true) + } + } + } +} /// Builder for [`IndexeddbEventCacheStore`] #[derive(Debug)] From 1328003a1e8425cc221825cac70a9f4632b9a109 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 31 Jan 2025 13:13:21 +0100 Subject: [PATCH 07/38] Add missing methods from EventCacheTrait --- .../src/event_cache_store/mod.rs | 147 ++++++++++++++++++ 1 file changed, 147 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 6ba27741ca7..463f3b5094c 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -15,6 +15,7 @@ use crate::event_cache_store::{ indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, }; +use anyhow::Ok; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; use matrix_sdk_base::event_cache::store::EventCacheStore; @@ -108,6 +109,152 @@ impl EventCacheStore for IndexeddbEventCacheStore { Ok(()) } + /// Return all the raw components of a linked chunk, so the caller may + /// reconstruct the linked chunk later. + async fn reload_linked_chunk(&self, room_id: &RoomId) -> Result>> { + Ok(vec![]) + } + + /// Clear persisted events for all the rooms. + /// + /// This will empty and remove all the linked chunks stored previously, + /// using the above [`Self::handle_linked_chunk_updates`] methods. + async fn clear_all_rooms_chunks(&self) -> Result<()> { + Ok(()) + } + + /// Add a media file's content in the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + /// + /// * `content` - The content of the file. + async fn add_media_content( + &self, + request: &MediaRequestParameters, + content: Vec, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<()> { + Ok(()) + } + + /// Replaces the given media's content key with another one. + /// + /// This should be used whenever a temporary (local) MXID has been used, and + /// it must now be replaced with its actual remote counterpart (after + /// uploading some content, or creating an empty MXC URI). + /// + /// ⚠ No check is performed to ensure that the media formats are consistent, + /// i.e. it's possible to update with a thumbnail key a media that was + /// keyed as a file before. The caller is responsible of ensuring that + /// the replacement makes sense, according to their use case. + /// + /// This should not raise an error when the `from` parameter points to an + /// unknown media, and it should silently continue in this case. + /// + /// # Arguments + /// + /// * `from` - The previous `MediaRequest` of the file. + /// + /// * `to` - The new `MediaRequest` of the file. + async fn replace_media_key( + &self, + from: &MediaRequestParameters, + to: &MediaRequestParameters, + ) -> Result<()> { + Ok(()) + } + + /// Get a media file's content out of the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + async fn get_media_content(&self, request: &MediaRequestParameters) -> Result>> { + Ok(None) + } + + /// Remove a media file's content from the media store. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequest` of the file. + async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { + Ok(()) + } + + /// Get a media file's content associated to an `MxcUri` from the + /// media store. + /// + /// In theory, there could be several files stored using the same URI and a + /// different `MediaFormat`. This API is meant to be used with a media file + /// that has only been stored with a single format. + /// + /// If there are several media files for a given URI in different formats, + /// this API will only return one of them. Which one is left as an + /// implementation detail. + /// + /// # Arguments + /// + /// * `uri` - The `MxcUri` of the media file. + async fn get_media_content_for_uri(&self, uri: &MxcUri) -> Result>> { + Ok(None) + } + + /// Remove all the media files' content associated to an `MxcUri` from the + /// media store. + /// + /// This should not raise an error when the `uri` parameter points to an + /// unknown media, and it should return an Ok result in this case. + /// + /// # Arguments + /// + /// * `uri` - The `MxcUri` of the media files. + async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { + Ok(()) + } + + fn media_retention_policy(&self) -> MediaRetentionPolicy { + MediaRetentionPolicy::KeepForever + } + + /// Set the `MediaRetentionPolicy` to use for deciding whether to store or + /// keep media content. + /// + /// # Arguments + /// + /// * `policy` - The `MediaRetentionPolicy` to use. + async fn set_media_retention_policy(&self, policy: MediaRetentionPolicy) -> Result<()> { + Ok(()) + } + + /// Set whether the current [`MediaRetentionPolicy`] should be ignored for + /// the media. + /// + /// The change will be taken into account in the next cleanup. + /// + /// # Arguments + /// + /// * `request` - The `MediaRequestParameters` of the file. + /// + /// * `ignore_policy` - Whether the current `MediaRetentionPolicy` should be + /// ignored. + async fn set_ignore_media_retention_policy( + &self, + request: &MediaRequestParameters, + ignore_policy: IgnoreMediaRetentionPolicy, + ) -> Result<()> { + Ok(()) + } + + /// Clean up the media cache with the current `MediaRetentionPolicy`. + /// + /// If there is already an ongoing cleanup, this is a noop. + async fn clean_up_media_cache(&self) -> Result<()> { + Ok(()) + } + async fn try_take_leased_lock( &self, lease_duration_ms: u32, From eebe00e70e637128e6bbe546ad101f0891712478 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 3 Feb 2025 13:51:25 +0100 Subject: [PATCH 08/38] Fix a bunch of errors, move error to it's own file --- .../src/event_cache_store/error.rs | 69 ++++++ .../src/event_cache_store/migrations.rs | 8 +- .../src/event_cache_store/mod.rs | 212 +++++++++++------- 3 files changed, 201 insertions(+), 88 deletions(-) create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs new file mode 100644 index 00000000000..1ba048e4981 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/error.rs @@ -0,0 +1,69 @@ +use matrix_sdk_base::{event_cache::store::EventCacheStoreError, StoreError}; +use matrix_sdk_crypto::store::CryptoStoreError; +use matrix_sdk_store_encryption::Error as EncryptionError; + +#[derive(Debug, thiserror::Error)] +pub enum IndexeddbEventCacheStoreError { + #[error(transparent)] + Json(#[from] serde_json::Error), + #[error(transparent)] + Encryption(#[from] EncryptionError), + #[error("DomException {name} ({code}): {message}")] + DomException { name: String, message: String, code: u16 }, + #[error(transparent)] + StoreError(#[from] StoreError), + #[error("Can't migrate {name} from {old_version} to {new_version} without deleting data. See MigrationConflictStrategy for ways to configure.")] + MigrationConflict { name: String, old_version: u32, new_version: u32 }, + #[error(transparent)] + CryptoStoreError(#[from] CryptoStoreError), +} + +impl From for IndexeddbEventCacheStoreError { + fn from(frm: web_sys::DomException) -> IndexeddbEventCacheStoreError { + IndexeddbEventCacheStoreError::DomException { + name: frm.name(), + message: frm.message(), + code: frm.code(), + } + } +} + +impl From for StoreError { + fn from(e: IndexeddbEventCacheStoreError) -> Self { + match e { + IndexeddbEventCacheStoreError::Json(e) => StoreError::Json(e), + IndexeddbEventCacheStoreError::StoreError(e) => e, + IndexeddbEventCacheStoreError::Encryption(e) => StoreError::Encryption(e), + _ => StoreError::backend(e), + } + } +} + +impl From for EventCacheStoreError { + fn from(e: IndexeddbEventCacheStoreError) -> Self { + match e { + IndexeddbEventCacheStoreError::Json(e) => EventCacheStoreError::Serialization(e), + IndexeddbEventCacheStoreError::StoreError(e) => { + EventCacheStoreError::Backend(Box::new(e)) + } + IndexeddbEventCacheStoreError::Encryption(e) => EventCacheStoreError::Encryption(e), + _ => EventCacheStoreError::backend(e), + } + } +} + +impl From for CryptoStoreError { + fn from(frm: IndexeddbEventCacheStoreError) -> CryptoStoreError { + match frm { + IndexeddbEventCacheStoreError::Json(e) => CryptoStoreError::Serialization(e), + IndexeddbEventCacheStoreError::CryptoStoreError(e) => e, + _ => CryptoStoreError::backend(frm), + } + } +} + +impl From for IndexeddbEventCacheStoreError { + fn from(e: serde_wasm_bindgen::Error) -> Self { + IndexeddbEventCacheStoreError::Json(serde::de::Error::custom(e.to_string())) + } +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 69713539eb1..5cddb1bf94f 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -1,10 +1,10 @@ +use super::{indexeddb_serializer::IndexeddbSerializer, Result}; use indexed_db_futures::IdbDatabase; -use super::{indexeddb_serializer::IndexeddbSerializer, IndexeddbEventCacheStoreError}; - pub async fn open_and_upgrade_db( name: &str, _serializer: &IndexeddbSerializer, -) -> Result { - Ok(IdbDatabase::open(name)?.await?) +) -> Result { + let db = IdbDatabase::open(name)?.await?; + Ok(db) } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 463f3b5094c..766c832f6f5 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -15,54 +15,104 @@ use crate::event_cache_store::{ indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, }; -use anyhow::Ok; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; -use matrix_sdk_base::event_cache::store::EventCacheStore; -use matrix_sdk_base::StoreError; -use matrix_sdk_store_encryption::{Error as EncryptionError, StoreCipher}; +use matrix_sdk_base::{ + event_cache::{ + store::{ + media::{ + EventCacheStoreMedia, + IgnoreMediaRetentionPolicy, + MediaRetentionPolicy, + // MediaService, + }, + EventCacheStore, + }, + Event, Gap, + }, + linked_chunk::{ + // ChunkContent, + // ChunkIdentifier, + RawChunk, + Update, + }, + media::{MediaRequestParameters, UniqueKey}, +}; +use matrix_sdk_store_encryption::StoreCipher; +use ruma::{ + // time::SystemTime, + MilliSecondsSinceUnixEpoch, + MxcUri, + RoomId, +}; use std::sync::Arc; use tracing::debug; +use wasm_bindgen::JsValue; +use web_sys::IdbTransactionMode; +mod error; mod indexeddb_serializer; mod migrations; -#[derive(Debug, thiserror::Error)] -pub enum IndexeddbEventCacheStoreError { - #[error(transparent)] - Json(#[from] serde_json::Error), - #[error(transparent)] - Encryption(#[from] EncryptionError), - #[error("DomException {name} ({code}): {message}")] - DomException { name: String, message: String, code: u16 }, - #[error(transparent)] - StoreError(#[from] StoreError), - #[error("Can't migrate {name} from {old_version} to {new_version} without deleting data. See MigrationConflictStrategy for ways to configure.")] - MigrationConflict { name: String, old_version: u32, new_version: u32 }, +pub use error::IndexeddbEventCacheStoreError; + +mod keys { + pub const CORE: &str = "core"; + // Entries in Key-value store + pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; + + // Tables + pub const LINKED_CHUNKS: &str = "linked_chunks"; + pub const MEDIA: &str = "media"; } -impl From for IndexeddbEventCacheStoreError { - fn from(frm: web_sys::DomException) -> IndexeddbEventCacheStoreError { - IndexeddbEventCacheStoreError::DomException { - name: frm.name(), - message: frm.message(), - code: frm.code(), - } - } +/// Builder for [`IndexeddbEventCacheStore`] +// #[derive(Debug)] // TODO StoreCipher cannot be derived +pub struct IndexeddbEventCacheStoreBuilder { + name: Option, + store_cipher: Option>, + migration_conflict_strategy: MigrationConflictStrategy, } -impl From for StoreError { - fn from(e: IndexeddbEventCacheStoreError) -> Self { - match e { - IndexeddbEventCacheStoreError::Json(e) => StoreError::Json(e), - IndexeddbEventCacheStoreError::StoreError(e) => e, - IndexeddbEventCacheStoreError::Encryption(e) => StoreError::Encryption(e), - _ => StoreError::backend(e), +impl IndexeddbEventCacheStoreBuilder { + fn new() -> Self { + Self { + name: None, + store_cipher: None, + migration_conflict_strategy: MigrationConflictStrategy::BackupAndDrop, } } -} -type Result = std::result::Result; + pub fn name(mut self, name: String) -> Self { + self.name = Some(name); + self + } + + pub fn store_cipher(mut self, store_cipher: Arc) -> Self { + self.store_cipher = Some(store_cipher); + self + } + + /// The strategy to use when a merge conflict is found. + /// + /// See [`MigrationConflictStrategy`] for details. + pub fn migration_conflict_strategy(mut self, value: MigrationConflictStrategy) -> Self { + self.migration_conflict_strategy = value; + self + } + + pub async fn build(self) -> Result { + // let migration_strategy = self.migration_conflict_strategy.clone(); + let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); + + let serializer = IndexeddbSerializer::new(self.store_cipher); + debug!("IndexedDbEventCacheStore: opening main store {name}"); + let inner = open_and_upgrade_db(&name, &serializer).await?; + + let store = IndexeddbEventCacheStore { name, inner, serializer }; + Ok(store) + } +} /// Sometimes Migrations can't proceed without having to drop existing /// data. This allows you to configure, how these cases should be handled. @@ -97,15 +147,56 @@ impl IndexeddbEventCacheStore { } } -#[async_trait] -impl EventCacheStore for IndexeddbEventCacheStore { - type Error = IndexeddbEventCacheStoreError; +type Result = std::result::Result; + +#[cfg(target_arch = "wasm32")] +macro_rules! impl_event_cache_store { + ({ $($body:tt)* }) => { + #[async_trait(?Send)] + impl EventCacheStore for IndexeddbEventCacheStore { + type Error = IndexeddbEventCacheStoreError; + + $($body)* + } + }; +} + +#[cfg(not(target_arch = "wasm32"))] +macro_rules! impl_state_store { + ({ $($body:tt)* }) => { + impl IndexeddbEventCacheStore { + $($body)* + } + }; +} +impl_event_cache_store!({ async fn handle_linked_chunk_updates( &self, room_id: &RoomId, updates: Vec>, ) -> Result<()> { + // let hashed_room_id = self.encode_key(keys::LINKED_CHUNKS, room_id); + // let room_id = room_id.to_owned(); + // let this = self.clone(); + let tx = self + .inner + .transaction_on_one_with_mode(keys::LINKED_CHUNKS, IdbTransactionMode::Readwrite)?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + + // for update in updates { + // match update { + // Update::Insert { chunk } => { + // let chunk = self.serializer.serialize_chunk(&chunk)?; + // object_store.put_key_val(&room_id, &chunk)?; + // } + // Update::Delete { chunk_id } => { + // object_store.delete(&chunk_id)?; + // } + // } + // } + Ok(()) } @@ -303,51 +394,4 @@ impl EventCacheStore for IndexeddbEventCacheStore { } } } -} - -/// Builder for [`IndexeddbEventCacheStore`] -#[derive(Debug)] -pub struct IndexeddbEventCacheStoreBuilder { - name: Option, - store_cipher: Option>, - migration_conflict_strategy: MigrationConflictStrategy, -} - -impl IndexeddbEventCacheStoreBuilder { - fn new() -> Self { - Self { - name: None, - store_cipher: None, - migration_conflict_strategy: MigrationConflictStrategy::BackupAndDrop, - } - } - - pub fn name(mut self, name: String) -> Self { - self.name = Some(name); - self - } - - pub fn store_cipher(mut self, store_cipher: Arc) -> Self { - self.store_cipher = Some(store_cipher); - self - } - - /// The strategy to use when a merge conflict is found. - /// - /// See [`MigrationConflictStrategy`] for details. - pub fn migration_conflict_strategy(mut self, value: MigrationConflictStrategy) -> Self { - self.migration_conflict_strategy = value; - self - } - - pub async fn build(self) -> Result { - // let migration_strategy = self.migration_conflict_strategy.clone(); - let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); - - let serializer = IndexeddbSerializer::new(self.store_cipher); - debug!("IndexedDbEventCacheStore: opening main store {name}"); - let inner = open_and_upgrade_db(&name, &serializer).await?; - - Ok(IndexeddbEventCacheStore { name, inner, serializer }) - } -} +}); From e49bda6f821d1b117c623dc9682e22337be16149 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 3 Feb 2025 14:33:59 +0100 Subject: [PATCH 09/38] Fix final errors around creating the event cache store --- .../src/event_cache_store/mod.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 766c832f6f5..b9678d8899a 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -17,11 +17,12 @@ use crate::event_cache_store::{ }; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; +use indexed_db_futures::IdbQuerySource; use matrix_sdk_base::{ event_cache::{ store::{ media::{ - EventCacheStoreMedia, + // EventCacheStoreMedia, IgnoreMediaRetentionPolicy, MediaRetentionPolicy, // MediaService, @@ -36,7 +37,10 @@ use matrix_sdk_base::{ RawChunk, Update, }, - media::{MediaRequestParameters, UniqueKey}, + media::{ + MediaRequestParameters, + // UniqueKey + }, }; use matrix_sdk_store_encryption::StoreCipher; use ruma::{ @@ -307,7 +311,9 @@ impl_event_cache_store!({ } fn media_retention_policy(&self) -> MediaRetentionPolicy { - MediaRetentionPolicy::KeepForever + // TODO on the sqlite version this has a media_service... what is that? + // It seems there is a Trait EventCacheStoreMedia that might need to be implemented + MediaRetentionPolicy::default() } /// Set the `MediaRetentionPolicy` to use for deciding whether to store or From 1b1029d300ee15993e2dd018e53e18c2f4862a67 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 3 Feb 2025 17:30:44 +0100 Subject: [PATCH 10/38] First insertion into indexeddb --- .../src/event_cache_store/idb_operations.rs | 26 +++++++++ .../src/event_cache_store/migrations.rs | 42 +++++++++++++- .../src/event_cache_store/mod.rs | 56 ++++++++++++------- 3 files changed, 102 insertions(+), 22 deletions(-) create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs new file mode 100644 index 00000000000..9def309320d --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -0,0 +1,26 @@ +use gloo_utils::format::JsValueSerdeExt; +use indexed_db_futures::idb_object_store::IdbObjectStore; +use serde::{Deserialize, Serialize}; +use wasm_bindgen::JsValue; + +#[derive(Serialize, Deserialize)] +struct Chunk { + id: String, + previous: Option, + new: u64, + next: Option, +} + +pub async fn insert_chunk( + store: IdbObjectStore<'_>, + hashed_room_id: &String, + previous: Option, + new: u64, + next: Option, +) -> Result<(), web_sys::DomException> { + let id = format!("{}-{}", hashed_room_id, new); + let chunk = Chunk { id, previous, new, next }; + let value = JsValue::from_serde(&chunk).unwrap(); + store.add_val(&value)?; + Ok(()) +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 5cddb1bf94f..a19b5afee18 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -1,10 +1,46 @@ -use super::{indexeddb_serializer::IndexeddbSerializer, Result}; -use indexed_db_futures::IdbDatabase; +use super::{indexeddb_serializer::IndexeddbSerializer, keys, Result}; +use indexed_db_futures::{ + idb_object_store::IdbObjectStoreParameters, + request::{IdbOpenDbRequestLike, OpenDbRequest}, + IdbDatabase, IdbKeyPath, IdbVersionChangeEvent, +}; +use wasm_bindgen::JsValue; + +const CURRENT_DB_VERSION: u32 = 1; pub async fn open_and_upgrade_db( name: &str, _serializer: &IndexeddbSerializer, ) -> Result { - let db = IdbDatabase::open(name)?.await?; + let mut db = IdbDatabase::open(name)?.await?; + + let old_version = db.version() as u32; + + if old_version == 0 { + // TODO some temporary code just to get going + // Take a look at the state_store migrations + // https://github.com/ospfranco/matrix-rust-sdk/blob/e49bda6f821d1b117c623dc9682e22337be16149/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs + db = setup_db(db, CURRENT_DB_VERSION).await?; + } + + Ok(db) +} + +async fn setup_db(db: IdbDatabase, version: u32) -> Result { + let name = db.name(); + db.close(); + + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, version)?; + db_req.set_on_upgrade_needed(Some( + move |events: &IdbVersionChangeEvent| -> Result<(), JsValue> { + let mut params = IdbObjectStoreParameters::new(); + params.key_path(Some(&IdbKeyPath::from("id"))); + events.db().create_object_store_with_params(keys::LINKED_CHUNKS, ¶ms); + Ok(()) + }, + )); + + let db = db_req.await?; + Ok(db) } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index b9678d8899a..538971a3d24 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -33,14 +33,12 @@ use matrix_sdk_base::{ }, linked_chunk::{ // ChunkContent, - // ChunkIdentifier, + ChunkIdentifier, RawChunk, Update, }, - media::{ - MediaRequestParameters, - // UniqueKey - }, + media::MediaRequestParameters, + // UniqueKey }; use matrix_sdk_store_encryption::StoreCipher; use ruma::{ @@ -50,11 +48,12 @@ use ruma::{ RoomId, }; use std::sync::Arc; -use tracing::debug; +use tracing::{debug, trace}; use wasm_bindgen::JsValue; use web_sys::IdbTransactionMode; mod error; +mod idb_operations; mod indexeddb_serializer; mod migrations; @@ -180,8 +179,9 @@ impl_event_cache_store!({ room_id: &RoomId, updates: Vec>, ) -> Result<()> { - // let hashed_room_id = self.encode_key(keys::LINKED_CHUNKS, room_id); - // let room_id = room_id.to_owned(); + // TODO not sure if this should be a String or JsValue (which I assume is a ByteArray) + let hashed_room_id = self.serializer.encode_key_as_string(keys::LINKED_CHUNKS, room_id); + let room_id = room_id.to_owned(); // let this = self.clone(); let tx = self .inner @@ -189,17 +189,35 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - // for update in updates { - // match update { - // Update::Insert { chunk } => { - // let chunk = self.serializer.serialize_chunk(&chunk)?; - // object_store.put_key_val(&room_id, &chunk)?; - // } - // Update::Delete { chunk_id } => { - // object_store.delete(&chunk_id)?; - // } - // } - // } + for update in updates { + match update { + Update::NewItemsChunk { previous, new, next } => { + let previous = previous.as_ref().map(ChunkIdentifier::index); + let new = new.index(); + let next = next.as_ref().map(ChunkIdentifier::index); + + trace!(%room_id,"Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); + + idb_operations::insert_chunk( + object_store, + &hashed_room_id, + previous, + new, + next, + ) + .await?; + } + Update::NewGapChunk { previous, new, next, gap } => todo!(), + Update::RemoveChunk(chunk_identifier) => todo!(), + Update::PushItems { at, items } => todo!(), + Update::ReplaceItem { at, item } => todo!(), + Update::RemoveItem { at } => todo!(), + Update::DetachLastItems { at } => todo!(), + Update::StartReattachItems => todo!(), + Update::EndReattachItems => todo!(), + Update::Clear => todo!(), + } + } Ok(()) } From 0f8ebf166f3acb16d6328c524e9e20e7fa41d4f1 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 4 Feb 2025 18:00:27 +0100 Subject: [PATCH 11/38] Finish implementing insert_chunk --- .../src/event_cache_store/idb_operations.rs | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs index 9def309320d..ea8a7d48a4c 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -18,9 +18,39 @@ pub async fn insert_chunk( new: u64, next: Option, ) -> Result<(), web_sys::DomException> { + // Insert new value let id = format!("{}-{}", hashed_room_id, new); let chunk = Chunk { id, previous, new, next }; let value = JsValue::from_serde(&chunk).unwrap(); store.add_val(&value)?; + + // Update previous if there + if let Some(previous) = previous { + let previous_id = format!("{}-{}", hashed_room_id, previous); + let previous_chunk: Chunk = store.get_val(&previous_id)?.unwrap(); + let updated_previous_chunk = Chunk { + id: previous_chunk.id, + previous: previous_chunk.previous, + new: previous_chunk.new, + next: Some(new), + }; + let updated_previous_value = JsValue::from_serde(&updated_previous_chunk).unwrap(); + store.put_val(&updated_previous_value)?; + } + + // update next if there + if let Some(next) = next { + let next_id = format!("{}-{}", hashed_room_id, next); + let next_chunk: Chunk = store.get_val(&next_id)?.unwrap(); + let updated_next_chunk = Chunk { + id: next_chunk.id, + previous: Some(new), + new: next_chunk.new, + next: next_chunk.next, + }; + let updated_next_value = JsValue::from_serde(&updated_next_chunk).unwrap(); + store.put_val(&updated_next_value)?; + } + Ok(()) } From ee40ddd1ccc7b99b5176a5de7afb4232b93010ae Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 4 Feb 2025 19:06:31 +0100 Subject: [PATCH 12/38] Comment unused code, finish implementing insert_chunk --- .../src/event_cache_store/builder.rs | 64 ++++++++ .../src/event_cache_store/idb_operations.rs | 18 ++- .../event_cache_store/indexeddb_serializer.rs | 96 ++++++------ .../src/event_cache_store/migrations.rs | 15 +- .../src/event_cache_store/mod.rs | 139 ++++++------------ crates/matrix-sdk-indexeddb/src/lib.rs | 6 +- 6 files changed, 184 insertions(+), 154 deletions(-) create mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs new file mode 100644 index 00000000000..0ef08177412 --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs @@ -0,0 +1,64 @@ +use super::{migrations::MigrationConflictStrategy, IndexeddbEventCacheStore, Result}; +use crate::event_cache_store::{ + indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, +}; + +use matrix_sdk_store_encryption::StoreCipher; +use std::sync::Arc; + +/// Builder for [`IndexeddbEventCacheStore`] +// #[derive(Debug)] // TODO StoreCipher cannot be derived +pub struct IndexeddbEventCacheStoreBuilder { + name: Option, + store_cipher: Option>, + migration_conflict_strategy: MigrationConflictStrategy, +} + +impl IndexeddbEventCacheStoreBuilder { + pub fn new() -> Self { + Self { + name: None, + store_cipher: None, + migration_conflict_strategy: MigrationConflictStrategy::BackupAndDrop, + } + } + + pub fn name(mut self, name: String) -> Self { + self.name = Some(name); + self + } + + pub fn store_cipher(mut self, store_cipher: Arc) -> Self { + self.store_cipher = Some(store_cipher); + self + } + + /// The strategy to use when a merge conflict is found. + /// + /// See [`MigrationConflictStrategy`] for details. + pub fn migration_conflict_strategy(mut self, value: MigrationConflictStrategy) -> Self { + self.migration_conflict_strategy = value; + self + } + + pub async fn build(self) -> Result { + // let migration_strategy = self.migration_conflict_strategy.clone(); + let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); + + let serializer = IndexeddbSerializer::new(self.store_cipher); + let inner = open_and_upgrade_db(&name, &serializer).await?; + + let store = IndexeddbEventCacheStore { + // name, + inner, + serializer, + }; + Ok(store) + } +} + +impl Default for IndexeddbEventCacheStoreBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs index ea8a7d48a4c..3ecc676ed84 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -1,5 +1,5 @@ use gloo_utils::format::JsValueSerdeExt; -use indexed_db_futures::idb_object_store::IdbObjectStore; +use indexed_db_futures::{idb_object_store::IdbObjectStore, IdbQuerySource}; use serde::{Deserialize, Serialize}; use wasm_bindgen::JsValue; @@ -9,30 +9,35 @@ struct Chunk { previous: Option, new: u64, next: Option, + type_str: String, } pub async fn insert_chunk( - store: IdbObjectStore<'_>, + store: &IdbObjectStore<'_>, hashed_room_id: &String, previous: Option, new: u64, next: Option, + type_str: &str, ) -> Result<(), web_sys::DomException> { // Insert new value let id = format!("{}-{}", hashed_room_id, new); - let chunk = Chunk { id, previous, new, next }; + let chunk = Chunk { id, previous, new, next, type_str: type_str.to_owned() }; let value = JsValue::from_serde(&chunk).unwrap(); store.add_val(&value)?; // Update previous if there if let Some(previous) = previous { let previous_id = format!("{}-{}", hashed_room_id, previous); - let previous_chunk: Chunk = store.get_val(&previous_id)?.unwrap(); + // TODO unsafe unwrap()? + let previous_chunk_js_value = store.get_owned(&previous_id)?.await?.unwrap(); + let previous_chunk: Chunk = previous_chunk_js_value.into_serde().unwrap(); let updated_previous_chunk = Chunk { id: previous_chunk.id, previous: previous_chunk.previous, new: previous_chunk.new, next: Some(new), + type_str: previous_chunk.type_str, }; let updated_previous_value = JsValue::from_serde(&updated_previous_chunk).unwrap(); store.put_val(&updated_previous_value)?; @@ -41,12 +46,15 @@ pub async fn insert_chunk( // update next if there if let Some(next) = next { let next_id = format!("{}-{}", hashed_room_id, next); - let next_chunk: Chunk = store.get_val(&next_id)?.unwrap(); + // TODO unsafe unwrap()? + let next_chunk_js_value = store.get_owned(&next_id)?.await?.unwrap(); + let next_chunk: Chunk = next_chunk_js_value.into_serde().unwrap(); let updated_next_chunk = Chunk { id: next_chunk.id, previous: Some(new), new: next_chunk.new, next: next_chunk.next, + type_str: next_chunk.type_str, }; let updated_next_value = JsValue::from_serde(&updated_next_chunk).unwrap(); store.put_val(&updated_next_value)?; diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs index 6b3c70bc0cd..57f7ae33faa 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs @@ -24,7 +24,7 @@ use matrix_sdk_crypto::CryptoStoreError; use matrix_sdk_store_encryption::{EncryptedValueBase64, StoreCipher}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use wasm_bindgen::JsValue; -use web_sys::IdbKeyRange; +// use web_sys::IdbKeyRange; use zeroize::Zeroizing; use crate::{safe_encode::SafeEncode, IndexeddbEventCacheStoreError}; @@ -64,13 +64,13 @@ impl IndexeddbSerializer { /// /// This is faster than [`Self::serialize_value`] and reliably gives the /// same output for the same input, making it suitable for index keys. - pub fn encode_key(&self, table_name: &str, key: T) -> JsValue - where - T: SafeEncode, - { - self.encode_key_as_string(table_name, key).into() - } - + // pub fn encode_key(&self, table_name: &str, key: T) -> JsValue + // where + // T: SafeEncode, + // { + // self.encode_key_as_string(table_name, key).into() + // } + // /// Hash the given key securely for the given tablename, using the store /// cipher. /// @@ -86,25 +86,25 @@ impl IndexeddbSerializer { } } - pub fn encode_to_range( - &self, - table_name: &str, - key: T, - ) -> Result - where - T: SafeEncode, - { - match &self.store_cipher { - Some(cipher) => key.encode_to_range_secure(table_name, cipher), - None => key.encode_to_range(), - } - .map_err(|e| IndexeddbEventCacheStoreError::DomException { - code: 0, - name: "IdbKeyRangeMakeError".to_owned(), - message: e, - }) - } - + // pub fn encode_to_range( + // &self, + // table_name: &str, + // key: T, + // ) -> Result + // where + // T: SafeEncode, + // { + // match &self.store_cipher { + // Some(cipher) => key.encode_to_range_secure(table_name, cipher), + // None => key.encode_to_range(), + // } + // .map_err(|e| IndexeddbEventCacheStoreError::DomException { + // code: 0, + // name: "IdbKeyRangeMakeError".to_owned(), + // message: e, + // }) + // } + // /// Encode the value for storage as a value in indexeddb. /// /// A thin wrapper around [`IndexeddbSerializer::maybe_encrypt_value`]: @@ -126,16 +126,16 @@ impl IndexeddbSerializer { /// Avoid using this in new code. Prefer /// [`IndexeddbSerializer::serialize_value`] or /// [`IndexeddbSerializer::maybe_encrypt_value`]. - pub fn serialize_value_as_bytes( - &self, - value: &impl Serialize, - ) -> Result, CryptoStoreError> { - match &self.store_cipher { - Some(cipher) => cipher.encrypt_value(value).map_err(CryptoStoreError::backend), - None => serde_json::to_vec(value).map_err(CryptoStoreError::backend), - } - } - + // pub fn serialize_value_as_bytes( + // &self, + // value: &impl Serialize, + // ) -> Result, CryptoStoreError> { + // match &self.store_cipher { + // Some(cipher) => cipher.encrypt_value(value).map_err(CryptoStoreError::backend), + // None => serde_json::to_vec(value).map_err(CryptoStoreError::backend), + // } + // } + // /// Encode an object for storage as a value in indexeddb. /// /// First serializes the object as JSON bytes. @@ -258,17 +258,17 @@ impl IndexeddbSerializer { /// Decode a value that was previously encoded with /// [`Self::serialize_value_as_bytes`] - pub fn deserialize_value_from_bytes( - &self, - value: &[u8], - ) -> Result { - if let Some(cipher) = &self.store_cipher { - cipher.decrypt_value(value).map_err(CryptoStoreError::backend) - } else { - serde_json::from_slice(value).map_err(CryptoStoreError::backend) - } - } - + // pub fn deserialize_value_from_bytes( + // &self, + // value: &[u8], + // ) -> Result { + // if let Some(cipher) = &self.store_cipher { + // cipher.decrypt_value(value).map_err(CryptoStoreError::backend) + // } else { + // serde_json::from_slice(value).map_err(CryptoStoreError::backend) + // } + // } + // /// Decode a value that was previously encoded with /// [`Self::maybe_encrypt_value`] pub fn maybe_decrypt_value( diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index a19b5afee18..9296012d418 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -8,6 +8,19 @@ use wasm_bindgen::JsValue; const CURRENT_DB_VERSION: u32 = 1; +/// data. This allows you to configure, how these cases should be handled. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum MigrationConflictStrategy { + /// Just drop the data, we don't care that we have to sync again + Drop, + /// Raise a [`IndexeddbStateStoreError::MigrationConflict`] error with the + /// path to the DB in question. The caller then has to take care about + /// what they want to do and try again after. + Raise, + /// Default. + BackupAndDrop, +} + pub async fn open_and_upgrade_db( name: &str, _serializer: &IndexeddbSerializer, @@ -35,7 +48,7 @@ async fn setup_db(db: IdbDatabase, version: u32) -> Result { move |events: &IdbVersionChangeEvent| -> Result<(), JsValue> { let mut params = IdbObjectStoreParameters::new(); params.key_path(Some(&IdbKeyPath::from("id"))); - events.db().create_object_store_with_params(keys::LINKED_CHUNKS, ¶ms); + events.db().create_object_store_with_params(keys::LINKED_CHUNKS, ¶ms)?; Ok(()) }, )); diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 538971a3d24..443649e19cc 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -11,10 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +mod builder; +mod error; +mod idb_operations; +mod indexeddb_serializer; +mod migrations; -use crate::event_cache_store::{ - indexeddb_serializer::IndexeddbSerializer, migrations::open_and_upgrade_db, -}; +use crate::event_cache_store::indexeddb_serializer::IndexeddbSerializer; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; use indexed_db_futures::IdbQuerySource; @@ -40,99 +43,39 @@ use matrix_sdk_base::{ media::MediaRequestParameters, // UniqueKey }; -use matrix_sdk_store_encryption::StoreCipher; + use ruma::{ // time::SystemTime, MilliSecondsSinceUnixEpoch, MxcUri, RoomId, }; -use std::sync::Arc; -use tracing::{debug, trace}; + +use tracing::trace; use wasm_bindgen::JsValue; use web_sys::IdbTransactionMode; -mod error; -mod idb_operations; -mod indexeddb_serializer; -mod migrations; - +pub use builder::IndexeddbEventCacheStoreBuilder; pub use error::IndexeddbEventCacheStoreError; mod keys { pub const CORE: &str = "core"; // Entries in Key-value store - pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; + // pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; // Tables pub const LINKED_CHUNKS: &str = "linked_chunks"; - pub const MEDIA: &str = "media"; -} - -/// Builder for [`IndexeddbEventCacheStore`] -// #[derive(Debug)] // TODO StoreCipher cannot be derived -pub struct IndexeddbEventCacheStoreBuilder { - name: Option, - store_cipher: Option>, - migration_conflict_strategy: MigrationConflictStrategy, -} - -impl IndexeddbEventCacheStoreBuilder { - fn new() -> Self { - Self { - name: None, - store_cipher: None, - migration_conflict_strategy: MigrationConflictStrategy::BackupAndDrop, - } - } - - pub fn name(mut self, name: String) -> Self { - self.name = Some(name); - self - } - - pub fn store_cipher(mut self, store_cipher: Arc) -> Self { - self.store_cipher = Some(store_cipher); - self - } - - /// The strategy to use when a merge conflict is found. - /// - /// See [`MigrationConflictStrategy`] for details. - pub fn migration_conflict_strategy(mut self, value: MigrationConflictStrategy) -> Self { - self.migration_conflict_strategy = value; - self - } - - pub async fn build(self) -> Result { - // let migration_strategy = self.migration_conflict_strategy.clone(); - let name = self.name.unwrap_or_else(|| "event_cache".to_owned()); - - let serializer = IndexeddbSerializer::new(self.store_cipher); - debug!("IndexedDbEventCacheStore: opening main store {name}"); - let inner = open_and_upgrade_db(&name, &serializer).await?; - - let store = IndexeddbEventCacheStore { name, inner, serializer }; - Ok(store) - } -} - -/// Sometimes Migrations can't proceed without having to drop existing -/// data. This allows you to configure, how these cases should be handled. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum MigrationConflictStrategy { - /// Just drop the data, we don't care that we have to sync again - Drop, - /// Raise a [`IndexeddbStateStoreError::MigrationConflict`] error with the - /// path to the DB in question. The caller then has to take care about - /// what they want to do and try again after. - Raise, - /// Default. - BackupAndDrop, + // pub const MEDIA: &str = "media"; } +/// The string used to identify a chunk of type events, in the `type` field in +/// the database. +const CHUNK_TYPE_EVENT_TYPE_STRING: &str = "E"; +/// The string used to identify a chunk of type gap, in the `type` field in the +/// database. +// const CHUNK_TYPE_GAP_TYPE_STRING: &str = "G"; pub struct IndexeddbEventCacheStore { - name: String, + // name: String, pub(crate) inner: IdbDatabase, pub(crate) serializer: IndexeddbSerializer, } @@ -199,20 +142,21 @@ impl_event_cache_store!({ trace!(%room_id,"Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); idb_operations::insert_chunk( - object_store, + &object_store, &hashed_room_id, previous, new, next, + CHUNK_TYPE_EVENT_TYPE_STRING, ) .await?; } - Update::NewGapChunk { previous, new, next, gap } => todo!(), - Update::RemoveChunk(chunk_identifier) => todo!(), - Update::PushItems { at, items } => todo!(), - Update::ReplaceItem { at, item } => todo!(), - Update::RemoveItem { at } => todo!(), - Update::DetachLastItems { at } => todo!(), + Update::NewGapChunk { previous: _, new: _, next: _, gap: _ } => todo!(), + Update::RemoveChunk(_chunk_identifier) => todo!(), + Update::PushItems { at: _, items: _ } => todo!(), + Update::ReplaceItem { at: _, item: _ } => todo!(), + Update::RemoveItem { at: _ } => todo!(), + Update::DetachLastItems { at: _ } => todo!(), Update::StartReattachItems => todo!(), Update::EndReattachItems => todo!(), Update::Clear => todo!(), @@ -224,7 +168,7 @@ impl_event_cache_store!({ /// Return all the raw components of a linked chunk, so the caller may /// reconstruct the linked chunk later. - async fn reload_linked_chunk(&self, room_id: &RoomId) -> Result>> { + async fn reload_linked_chunk(&self, _room_id: &RoomId) -> Result>> { Ok(vec![]) } @@ -245,9 +189,9 @@ impl_event_cache_store!({ /// * `content` - The content of the file. async fn add_media_content( &self, - request: &MediaRequestParameters, - content: Vec, - ignore_policy: IgnoreMediaRetentionPolicy, + _request: &MediaRequestParameters, + _content: Vec, + _ignore_policy: IgnoreMediaRetentionPolicy, ) -> Result<()> { Ok(()) } @@ -273,8 +217,8 @@ impl_event_cache_store!({ /// * `to` - The new `MediaRequest` of the file. async fn replace_media_key( &self, - from: &MediaRequestParameters, - to: &MediaRequestParameters, + _from: &MediaRequestParameters, + _to: &MediaRequestParameters, ) -> Result<()> { Ok(()) } @@ -284,7 +228,10 @@ impl_event_cache_store!({ /// # Arguments /// /// * `request` - The `MediaRequest` of the file. - async fn get_media_content(&self, request: &MediaRequestParameters) -> Result>> { + async fn get_media_content( + &self, + _request: &MediaRequestParameters, + ) -> Result>> { Ok(None) } @@ -293,7 +240,7 @@ impl_event_cache_store!({ /// # Arguments /// /// * `request` - The `MediaRequest` of the file. - async fn remove_media_content(&self, request: &MediaRequestParameters) -> Result<()> { + async fn remove_media_content(&self, _request: &MediaRequestParameters) -> Result<()> { Ok(()) } @@ -311,7 +258,7 @@ impl_event_cache_store!({ /// # Arguments /// /// * `uri` - The `MxcUri` of the media file. - async fn get_media_content_for_uri(&self, uri: &MxcUri) -> Result>> { + async fn get_media_content_for_uri(&self, _uri: &MxcUri) -> Result>> { Ok(None) } @@ -324,7 +271,7 @@ impl_event_cache_store!({ /// # Arguments /// /// * `uri` - The `MxcUri` of the media files. - async fn remove_media_content_for_uri(&self, uri: &MxcUri) -> Result<()> { + async fn remove_media_content_for_uri(&self, _uri: &MxcUri) -> Result<()> { Ok(()) } @@ -340,7 +287,7 @@ impl_event_cache_store!({ /// # Arguments /// /// * `policy` - The `MediaRetentionPolicy` to use. - async fn set_media_retention_policy(&self, policy: MediaRetentionPolicy) -> Result<()> { + async fn set_media_retention_policy(&self, _policy: MediaRetentionPolicy) -> Result<()> { Ok(()) } @@ -357,8 +304,8 @@ impl_event_cache_store!({ /// ignored. async fn set_ignore_media_retention_policy( &self, - request: &MediaRequestParameters, - ignore_policy: IgnoreMediaRetentionPolicy, + _request: &MediaRequestParameters, + _ignore_policy: IgnoreMediaRetentionPolicy, ) -> Result<()> { Ok(()) } diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index 085588be774..c5bd39c9aca 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -21,9 +21,7 @@ mod state_store; #[cfg(feature = "e2e-encryption")] pub use crypto_store::{IndexeddbCryptoStore, IndexeddbCryptoStoreError}; #[cfg(feature = "event-cache-store")] -pub use event_cache_store::{ - IndexeddbEventCacheStore, IndexeddbEventCacheStoreBuilder, IndexeddbEventCacheStoreError, -}; +pub use event_cache_store::{IndexeddbEventCacheStore, IndexeddbEventCacheStoreError}; #[cfg(feature = "state-store")] pub use state_store::{ IndexeddbStateStore, IndexeddbStateStoreBuilder, IndexeddbStateStoreError, @@ -79,7 +77,7 @@ pub async fn open_event_cache_store( ) -> Result { let mut builder = IndexeddbEventCacheStore::builder().name(name.to_owned()); if let Some(store_cipher) = store_cipher { - builder = builder.store_cipher(store_cipher.clone()); + builder = builder.store_cipher(store_cipher); } let event_cache_store = builder.build().await.map_err(StoreError::from)?; From 73713ac19abea974cff47f4a8a16ab2cad109dc2 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 4 Feb 2025 19:12:05 +0100 Subject: [PATCH 13/38] Get rid of name param --- .../matrix-sdk-indexeddb/src/event_cache_store/builder.rs | 6 +----- crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs index 0ef08177412..d3cdfb28b71 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/builder.rs @@ -48,11 +48,7 @@ impl IndexeddbEventCacheStoreBuilder { let serializer = IndexeddbSerializer::new(self.store_cipher); let inner = open_and_upgrade_db(&name, &serializer).await?; - let store = IndexeddbEventCacheStore { - // name, - inner, - serializer, - }; + let store = IndexeddbEventCacheStore { inner, serializer }; Ok(store) } } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 443649e19cc..37277460a32 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -75,7 +75,6 @@ const CHUNK_TYPE_EVENT_TYPE_STRING: &str = "E"; /// database. // const CHUNK_TYPE_GAP_TYPE_STRING: &str = "G"; pub struct IndexeddbEventCacheStore { - // name: String, pub(crate) inner: IdbDatabase, pub(crate) serializer: IndexeddbSerializer, } From d3b6f87609676028f95d34366b0f80fe006c7604 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 4 Feb 2025 19:12:51 +0100 Subject: [PATCH 14/38] Insert GAP chunk --- .../src/event_cache_store/mod.rs | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 37277460a32..847f5416451 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -73,7 +73,7 @@ mod keys { const CHUNK_TYPE_EVENT_TYPE_STRING: &str = "E"; /// The string used to identify a chunk of type gap, in the `type` field in the /// database. -// const CHUNK_TYPE_GAP_TYPE_STRING: &str = "G"; +const CHUNK_TYPE_GAP_TYPE_STRING: &str = "G"; pub struct IndexeddbEventCacheStore { pub(crate) inner: IdbDatabase, pub(crate) serializer: IndexeddbSerializer, @@ -150,7 +150,23 @@ impl_event_cache_store!({ ) .await?; } - Update::NewGapChunk { previous: _, new: _, next: _, gap: _ } => todo!(), + Update::NewGapChunk { previous, new, next, gap } => { + let previous = previous.as_ref().map(ChunkIdentifier::index); + let new = new.index(); + let next = next.as_ref().map(ChunkIdentifier::index); + + trace!(%room_id,"Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); + + idb_operations::insert_chunk( + &object_store, + &hashed_room_id, + previous, + new, + next, + CHUNK_TYPE_GAP_TYPE_STRING, + ) + .await?; + } Update::RemoveChunk(_chunk_identifier) => todo!(), Update::PushItems { at: _, items: _ } => todo!(), Update::ReplaceItem { at: _, item: _ } => todo!(), From 85285227efb19ec2aa0f8d8829e5253e6bd286ea Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 4 Feb 2025 19:20:09 +0100 Subject: [PATCH 15/38] Tried to implement insert gap, but missing encode_value on serializer --- .../src/event_cache_store/idb_operations.rs | 9 +++++++++ crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs index 3ecc676ed84..13f92f029f8 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -62,3 +62,12 @@ pub async fn insert_chunk( Ok(()) } + +pub async fn insert_gap( + _store: &IdbObjectStore<'_>, + _hashed_room_id: &String, + _new: u64, + _prev_token: Option, +) -> Result<(), web_sys::DomException> { + Ok(()) +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 847f5416451..5530e3d9cc0 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -151,6 +151,9 @@ impl_event_cache_store!({ .await?; } Update::NewGapChunk { previous, new, next, gap } => { + let serialized = serde_json::to_vec(&gap.prev_token)?; + let prev_token = self.serializer.encode_value(serialized)?; + let previous = previous.as_ref().map(ChunkIdentifier::index); let new = new.index(); let next = next.as_ref().map(ChunkIdentifier::index); @@ -166,6 +169,9 @@ impl_event_cache_store!({ CHUNK_TYPE_GAP_TYPE_STRING, ) .await?; + + idb_operations::insert_gap(&object_store, &hashed_room_id, new, prev_token) + .await? } Update::RemoveChunk(_chunk_identifier) => todo!(), Update::PushItems { at: _, items: _ } => todo!(), From 7622be47e5efdda3fa6dd39603c8a2ecd2204944 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 6 Feb 2025 14:42:06 +0100 Subject: [PATCH 16/38] Implement insert gap --- .../src/event_cache_store/idb_operations.rs | 23 +++++++-- .../src/event_cache_store/mod.rs | 48 ++++++++++++++----- 2 files changed, 54 insertions(+), 17 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs index 13f92f029f8..1fca7b6ef1b 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -63,11 +63,24 @@ pub async fn insert_chunk( Ok(()) } -pub async fn insert_gap( - _store: &IdbObjectStore<'_>, - _hashed_room_id: &String, - _new: u64, - _prev_token: Option, +pub fn insert_gap( + store: &IdbObjectStore<'_>, + hashed_room_id: &String, + new: u64, + prev_token: &JsValue, ) -> Result<(), web_sys::DomException> { + let id = format!("{}-{}", hashed_room_id, new); + let id = JsValue::from_str(&id); + store.add_key_val(&id, prev_token)?; + Ok(()) } + +pub fn remove_chunk( + store: &IdbObjectStore<'_>, + hashed_room_id: &String, + id: u64, +) -> Resut<(), web_sys::DomException> { + // let id = format!("{}-{}", hashed_room_id, new); + // let id = JsValue::from_str(&id); +} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 5530e3d9cc0..78bd7cb7951 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -65,6 +65,7 @@ mod keys { // Tables pub const LINKED_CHUNKS: &str = "linked_chunks"; + pub const GAPS: &str = "gaps"; // pub const MEDIA: &str = "media"; } @@ -124,16 +125,17 @@ impl_event_cache_store!({ // TODO not sure if this should be a String or JsValue (which I assume is a ByteArray) let hashed_room_id = self.serializer.encode_key_as_string(keys::LINKED_CHUNKS, room_id); let room_id = room_id.to_owned(); - // let this = self.clone(); - let tx = self - .inner - .transaction_on_one_with_mode(keys::LINKED_CHUNKS, IdbTransactionMode::Readwrite)?; - - let object_store = tx.object_store(keys::LINKED_CHUNKS)?; for update in updates { match update { Update::NewItemsChunk { previous, new, next } => { + let tx = self.inner.transaction_on_one_with_mode( + keys::LINKED_CHUNKS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + let previous = previous.as_ref().map(ChunkIdentifier::index); let new = new.index(); let next = next.as_ref().map(ChunkIdentifier::index); @@ -151,14 +153,20 @@ impl_event_cache_store!({ .await?; } Update::NewGapChunk { previous, new, next, gap } => { - let serialized = serde_json::to_vec(&gap.prev_token)?; - let prev_token = self.serializer.encode_value(serialized)?; + let tx = self.inner.transaction_on_one_with_mode( + keys::LINKED_CHUNKS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + + let prev_token = self.serializer.serialize_value(&gap.prev_token)?; let previous = previous.as_ref().map(ChunkIdentifier::index); let new = new.index(); let next = next.as_ref().map(ChunkIdentifier::index); - trace!(%room_id,"Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); + trace!(%room_id,"Inserting new gap (prev={previous:?}, new={new}, next={next:?})"); idb_operations::insert_chunk( &object_store, @@ -170,10 +178,26 @@ impl_event_cache_store!({ ) .await?; - idb_operations::insert_gap(&object_store, &hashed_room_id, new, prev_token) - .await? + let tx = self + .inner + .transaction_on_one_with_mode(keys::GAPS, IdbTransactionMode::Readwrite)?; + + let object_store = tx.object_store(keys::GAPS)?; + + idb_operations::insert_gap(&object_store, &hashed_room_id, new, &prev_token)? + } + Update::RemoveChunk(id) => { + let tx = self.inner.transaction_on_one_with_mode( + keys::LINKED_CHUNKS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + + trace!("Removing chunk {id:?}"); + + idb_operations::remove_chunk(&object_store, &hashed_room_id, id); } - Update::RemoveChunk(_chunk_identifier) => todo!(), Update::PushItems { at: _, items: _ } => todo!(), Update::ReplaceItem { at: _, item: _ } => todo!(), Update::RemoveItem { at: _ } => todo!(), From 01cb5ba7e48bbafb93c8b22d47a7938785f2ed67 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 6 Feb 2025 15:07:37 +0100 Subject: [PATCH 17/38] remove chunk deletes from LINKED_CHUNKS store, missing related entities --- .../src/event_cache_store/idb_operations.rs | 44 +++++++++++++++++-- .../src/event_cache_store/mod.rs | 3 +- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs index 1fca7b6ef1b..277169ec114 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs @@ -76,11 +76,47 @@ pub fn insert_gap( Ok(()) } -pub fn remove_chunk( +pub async fn remove_chunk( store: &IdbObjectStore<'_>, hashed_room_id: &String, id: u64, -) -> Resut<(), web_sys::DomException> { - // let id = format!("{}-{}", hashed_room_id, new); - // let id = JsValue::from_str(&id); +) -> Result<(), web_sys::DomException> { + let id = format!("{}-{}", hashed_room_id, id); + + // Get current value, so we can later update prev and next + let chunk_to_delete_js_value = store.get_owned(id)?.await?.unwrap(); + let chunk_to_delete: Chunk = chunk_to_delete_js_value.into_serde().unwrap(); + + // Get previous value + if let Some(previous) = chunk_to_delete.previous { + let previous_id = format!("{}-{}", hashed_room_id, previous); + let previous_chunk_js_value = store.get_owned(previous_id)?.await?.unwrap(); + let mut previous_chunk: Chunk = previous_chunk_js_value.into_serde().unwrap(); + + previous_chunk.next = chunk_to_delete.next; + + // save modified chunk + let updated_previous_value = JsValue::from_serde(&previous_chunk).unwrap(); + store.put_val(&updated_previous_value)?; + } + + // Get next value if there and update it's previous + if let Some(next) = chunk_to_delete.next { + let next_id = format!("{}-{}", hashed_room_id, next); + let next_chunk_js_value = store.get_owned(next_id)?.await?.unwrap(); + let mut next_chunk: Chunk = next_chunk_js_value.into_serde().unwrap(); + + next_chunk.previous = chunk_to_delete.previous; + + // save modified chunk + let updated_next_value = JsValue::from_serde(&next_chunk).unwrap(); + store.put_val(&updated_next_value)?; + } + + store.delete_owned(chunk_to_delete.id)?; + + // TODO on the SQLite version, there is a cascading delete that deletes related entities + // we will have to implement those manually here + + Ok(()) } diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 78bd7cb7951..241b8b9c71d 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -196,7 +196,8 @@ impl_event_cache_store!({ trace!("Removing chunk {id:?}"); - idb_operations::remove_chunk(&object_store, &hashed_room_id, id); + idb_operations::remove_chunk(&object_store, &hashed_room_id, id.index()) + .await?; } Update::PushItems { at: _, items: _ } => todo!(), Update::ReplaceItem { at: _, item: _ } => todo!(), From 8f2b4275cb83463ddfc113bb9bd5b59a5504ca0a Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 10:35:58 +0100 Subject: [PATCH 18/38] Fix some compilation errors --- .../src/event_cache_store/mod.rs | 34 +++++++++++++++---- crates/matrix-sdk-indexeddb/src/lib.rs | 15 ++++---- crates/matrix-sdk/src/client/builder/mod.rs | 19 ++++------- 3 files changed, 41 insertions(+), 27 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 241b8b9c71d..473b9c795b1 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -60,6 +60,7 @@ pub use error::IndexeddbEventCacheStoreError; mod keys { pub const CORE: &str = "core"; + pub const EVENTS: &str = "events"; // Entries in Key-value store // pub const MEDIA_RETENTION_POLICY: &str = "media_retention_policy"; @@ -199,13 +200,32 @@ impl_event_cache_store!({ idb_operations::remove_chunk(&object_store, &hashed_room_id, id.index()) .await?; } - Update::PushItems { at: _, items: _ } => todo!(), - Update::ReplaceItem { at: _, item: _ } => todo!(), - Update::RemoveItem { at: _ } => todo!(), - Update::DetachLastItems { at: _ } => todo!(), - Update::StartReattachItems => todo!(), - Update::EndReattachItems => todo!(), - Update::Clear => todo!(), + Update::PushItems { at, items } => { + let chunk_id = at.chunk_identifier().index(); + + trace!(%room_id, "pushing {} items @ {chunk_id}", items.len()); + + let tx = self.inner.transaction_on_one_with_mode( + keys::EVENTS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::EVENTS)?; + + for (i, event) in items.into_iter().enumerate() { + let event_id = format!("{}-{}", chunk_id, i); + let event_id = JsValue::from_str(&event_id); + let event = self.serializer.serialize_value(&event)?; + + object_store.add_key_val(&event_id, &event)?; + } + } + Update::ReplaceItem { at: _, item: _ } => {} + Update::RemoveItem { at: _ } => {} + Update::DetachLastItems { at: _ } => {} + Update::StartReattachItems => {} + Update::EndReattachItems => {} + Update::Clear => {} } } diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index c5bd39c9aca..0d71f5e520b 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -4,8 +4,10 @@ use matrix_sdk_base::store::StoreError; #[cfg(feature = "event-cache-store")] use matrix_sdk_store_encryption::StoreCipher; -use std::sync::Arc; + #[cfg(feature = "event-cache-store")] +use std::sync::Arc; + use thiserror::Error; #[cfg(feature = "e2e-encryption")] @@ -30,11 +32,11 @@ pub use state_store::{ /// Create a [`IndexeddbStateStore`] and a [`IndexeddbCryptoStore`] that use the /// same name and passphrase. -#[cfg(all(feature = "e2e-encryption", feature = "state-store"))] +#[cfg(all(feature = "e2e-encryption", feature = "state-store", feature = "event-cache-store"))] pub async fn open_stores_with_name( name: &str, passphrase: Option<&str>, -) -> Result<(IndexeddbStateStore, IndexeddbCryptoStore), OpenStoreError> { +) -> Result<(IndexeddbStateStore, IndexeddbCryptoStore, IndexeddbEventCacheStore), OpenStoreError> { let mut builder = IndexeddbStateStore::builder().name(name.to_owned()); if let Some(passphrase) = passphrase { builder = builder.passphrase(passphrase.to_owned()); @@ -45,7 +47,9 @@ pub async fn open_stores_with_name( IndexeddbCryptoStore::open_with_store_cipher(name, state_store.store_cipher.clone()) .await?; - Ok((state_store, crypto_store)) + let event_cache_store = open_event_cache_store(name, state_store.store_cipher.clone()).await?; + + Ok((state_store, crypto_store, event_cache_store)) } /// Create an [`IndexeddbStateStore`]. @@ -67,9 +71,6 @@ pub async fn open_state_store( } /// Create an ['IndexeddbEventCacheStore'] -/// -/// If a `passphrase` is given, the store will be encrypted using a key derived -/// from that passphrase. #[cfg(feature = "event-cache-store")] pub async fn open_event_cache_store( name: &str, diff --git a/crates/matrix-sdk/src/client/builder/mod.rs b/crates/matrix-sdk/src/client/builder/mod.rs index 5a98b5e1664..eddb08a32dc 100644 --- a/crates/matrix-sdk/src/client/builder/mod.rs +++ b/crates/matrix-sdk/src/client/builder/mod.rs @@ -621,30 +621,23 @@ async fn build_indexeddb_store_config( #[cfg(feature = "e2e-encryption")] let store_config = { - let (state_store, crypto_store) = + let (state_store, crypto_store, event_cache_store) = matrix_sdk_indexeddb::open_stores_with_name(name, passphrase).await?; StoreConfig::new(cross_process_store_locks_holder_name) .state_store(state_store) .crypto_store(crypto_store) + .event_cache_store(event_cache_store) }; #[cfg(not(feature = "e2e-encryption"))] let store_config = { let state_store = matrix_sdk_indexeddb::open_state_store(name, passphrase).await?; - StoreConfig::new(cross_process_store_locks_holder_name).state_store(state_store) - }; - - let store_config = { - let event_cache_store = - matrix_sdk_indexeddb::open_event_cache_store(name, passphrase).await?; - store_config.event_cache_store(event_cache_store) + let event_cache_store = matrix_sdk_indexeddb::open_event_cache_store(name, None).await?; + StoreConfig::new(cross_process_store_locks_holder_name) + .state_store(state_store) + .event_cache_store(event_cache_store) }; - // let store_config = { - // tracing::warn!("The IndexedDB backend does not implement an event cache store, falling back to the in-memory event cache store…"); - // store_config.event_cache_store(matrix_sdk_base::event_cache::store::MemoryStore::new()) - // }; - Ok(store_config) } From 722bac2ad71d636e763377666e54a94aa376c0a1 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 10:50:25 +0100 Subject: [PATCH 19/38] PushItems --- .../matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 473b9c795b1..095c2d82fcc 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -214,10 +214,12 @@ impl_event_cache_store!({ for (i, event) in items.into_iter().enumerate() { let event_id = format!("{}-{}", chunk_id, i); - let event_id = JsValue::from_str(&event_id); - let event = self.serializer.serialize_value(&event)?; - - object_store.add_key_val(&event_id, &event)?; + let event_id_js_value = JsValue::from_str(&event_id); + let index = at.index() + i; + // Can the ID be encrypted when inserting? + let value = serde_json::json!({ "id": event_id, "content": event, "room_id": room_id.to_string(), "position": index }); + let value = self.serializer.serialize_value(&value)?; + object_store.add_key_val(&event_id_js_value, &value)?; } } Update::ReplaceItem { at: _, item: _ } => {} From 4ca221f11009b675c8109baa926870016ad037fb Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 10:53:01 +0100 Subject: [PATCH 20/38] PushItems --- .../src/event_cache_store/mod.rs | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 095c2d82fcc..cbb22f9bd73 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -222,7 +222,32 @@ impl_event_cache_store!({ object_store.add_key_val(&event_id_js_value, &value)?; } } - Update::ReplaceItem { at: _, item: _ } => {} + Update::ReplaceItem { at, item } => { + let chunk_id = at.chunk_identifier().index(); + let index = at.index(); + + trace!(%room_id, "replacing item @ {chunk_id}:{index}"); + + let tx = self.inner.transaction_on_one_with_mode( + keys::EVENTS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::EVENTS)?; + + let event_id = format!("{}-{}", chunk_id, index); + + let value = serde_json::json!({ + "id": event_id, + "content": item, + "room_id": room_id.to_string(), + "position": index + }); + + let value = self.serializer.serialize_value(&value)?; + + object_store.put_key_val(&JsValue::from_str(&event_id), &value)?; + } Update::RemoveItem { at: _ } => {} Update::DetachLastItems { at: _ } => {} Update::StartReattachItems => {} From 0e05ac8f3579222c5397a99093e6e11867b43bdb Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 10:53:49 +0100 Subject: [PATCH 21/38] RemoteItem --- .../src/event_cache_store/mod.rs | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index cbb22f9bd73..40bb9dcd5bc 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -248,7 +248,24 @@ impl_event_cache_store!({ object_store.put_key_val(&JsValue::from_str(&event_id), &value)?; } - Update::RemoveItem { at: _ } => {} + Update::RemoveItem { at } => { + let chunk_id = at.chunk_identifier().index(); + let index = at.index(); + + trace!(%room_id, "removing item @ {chunk_id}:{index}"); + + let tx = self.inner.transaction_on_one_with_mode( + keys::EVENTS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::EVENTS)?; + + let event_id = format!("{}-{}", chunk_id, index); + let event_id_js_value = JsValue::from_str(&event_id); + + object_store.delete(&event_id_js_value)?; + } Update::DetachLastItems { at: _ } => {} Update::StartReattachItems => {} Update::EndReattachItems => {} From 28647b6dddfd1cbd0144010501fbd5aba128d656 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 11:02:23 +0100 Subject: [PATCH 22/38] Half implementation of DetachLastItems --- .../src/event_cache_store/mod.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 40bb9dcd5bc..56c0deafac6 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -266,7 +266,23 @@ impl_event_cache_store!({ object_store.delete(&event_id_js_value)?; } - Update::DetachLastItems { at: _ } => {} + Update::DetachLastItems { at } => { + let chunk_id = at.chunk_identifier().index(); + let index = at.index(); + + trace!(%room_id, "detaching last items @ {chunk_id}:{index}"); + + let tx = self.inner.transaction_on_one_with_mode( + keys::EVENTS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::EVENTS)?; + + let event = object_store + .get_key(&JsValue::from_str(&format!("{}-{}", chunk_id, index)))? + .await?; + } Update::StartReattachItems => {} Update::EndReattachItems => {} Update::Clear => {} From eee0bad9ff6a1eee90f54fa163f9d7c6040a1984 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 11:06:03 +0100 Subject: [PATCH 23/38] Remove deletion --- crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 56c0deafac6..02806d0d6a2 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -279,9 +279,8 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::EVENTS)?; - let event = object_store - .get_key(&JsValue::from_str(&format!("{}-{}", chunk_id, index)))? - .await?; + // We need to remove ALL the items >= index + // But since we are on no sql, we can't do a range query } Update::StartReattachItems => {} Update::EndReattachItems => {} From 0b0e15e427d608cc6360329c087e8b7f104efa7b Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 10 Feb 2025 13:50:42 +0100 Subject: [PATCH 24/38] Add placeholder for deletion --- crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 02806d0d6a2..8552d96c2ba 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -197,8 +197,15 @@ impl_event_cache_store!({ trace!("Removing chunk {id:?}"); + // Remove the chunk itself idb_operations::remove_chunk(&object_store, &hashed_room_id, id.index()) .await?; + + // Now remove the events (if any) linked to the chunk + // What is the max events a LinkedChunk holds so I can iterate through all the possible values + // [1..MAX_LINKED_CHUNK_EVENTS].forEach(|i| { + // Do the deletion of all the events for the chunk + //}) } Update::PushItems { at, items } => { let chunk_id = at.chunk_identifier().index(); From 993283cb38cf4c05408022b7b88283500c5ef107 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 11 Feb 2025 10:05:07 +0100 Subject: [PATCH 25/38] Change previous implementations to use serializer --- .../event_cache_store/indexeddb_serializer.rs | 40 ++--- .../src/event_cache_store/mod.rs | 164 ++++++++++++++---- 2 files changed, 153 insertions(+), 51 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs index 57f7ae33faa..3e5df437eff 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs @@ -24,7 +24,7 @@ use matrix_sdk_crypto::CryptoStoreError; use matrix_sdk_store_encryption::{EncryptedValueBase64, StoreCipher}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use wasm_bindgen::JsValue; -// use web_sys::IdbKeyRange; +use web_sys::IdbKeyRange; use zeroize::Zeroizing; use crate::{safe_encode::SafeEncode, IndexeddbEventCacheStoreError}; @@ -86,25 +86,25 @@ impl IndexeddbSerializer { } } - // pub fn encode_to_range( - // &self, - // table_name: &str, - // key: T, - // ) -> Result - // where - // T: SafeEncode, - // { - // match &self.store_cipher { - // Some(cipher) => key.encode_to_range_secure(table_name, cipher), - // None => key.encode_to_range(), - // } - // .map_err(|e| IndexeddbEventCacheStoreError::DomException { - // code: 0, - // name: "IdbKeyRangeMakeError".to_owned(), - // message: e, - // }) - // } - // + pub fn encode_to_range( + &self, + table_name: &str, + key: T, + ) -> Result + where + T: SafeEncode, + { + match &self.store_cipher { + Some(cipher) => key.encode_to_range_secure(table_name, cipher), + None => key.encode_to_range(), + } + .map_err(|e| IndexeddbEventCacheStoreError::DomException { + code: 0, + name: "IdbKeyRangeMakeError".to_owned(), + message: e, + }) + } + /// Encode the value for storage as a value in indexeddb. /// /// A thin wrapper around [`IndexeddbSerializer::maybe_encrypt_value`]: diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 8552d96c2ba..e3a0a8c12f4 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -51,6 +51,8 @@ use ruma::{ RoomId, }; +use serde::Deserialize; +use serde::Serialize; use tracing::trace; use wasm_bindgen::JsValue; use web_sys::IdbTransactionMode; @@ -108,6 +110,14 @@ macro_rules! impl_event_cache_store { }; } +#[derive(Serialize, Deserialize)] +struct Chunk { + id: String, + previous: Option, + next: Option, + type_str: String, +} + #[cfg(not(target_arch = "wasm32"))] macro_rules! impl_state_store { ({ $($body:tt)* }) => { @@ -123,10 +133,6 @@ impl_event_cache_store!({ room_id: &RoomId, updates: Vec>, ) -> Result<()> { - // TODO not sure if this should be a String or JsValue (which I assume is a ByteArray) - let hashed_room_id = self.serializer.encode_key_as_string(keys::LINKED_CHUNKS, room_id); - let room_id = room_id.to_owned(); - for update in updates { match update { Update::NewItemsChunk { previous, new, next } => { @@ -141,17 +147,62 @@ impl_event_cache_store!({ let new = new.index(); let next = next.as_ref().map(ChunkIdentifier::index); - trace!(%room_id,"Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); + trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); - idb_operations::insert_chunk( - &object_store, - &hashed_room_id, + let chunk = Chunk { + id: format!("{room_id}-{new}"), previous, - new, next, - CHUNK_TYPE_EVENT_TYPE_STRING, - ) - .await?; + type_str: CHUNK_TYPE_EVENT_TYPE_STRING.to_owned(), + }; + + let serialized_value = self.serializer.serialize_value(&chunk)?; + + object_store.add_val(&serialized_value)?; + + // Update previous if there + if let Some(previous) = previous { + let previous_id = self + .serializer + .encode_key_as_string(&room_id.to_string(), previous.to_string()); + let previous_chunk_js_value = + object_store.get_owned(&previous_id)?.await?.unwrap(); + + let previous_chunk: Chunk = + self.serializer.deserialize_value(previous_chunk_js_value)?; + + let updated_previous_chunk = Chunk { + id: previous_id, + previous: previous_chunk.previous, + next: Some(new), + type_str: previous_chunk.type_str, + }; + let updated_previous_value = + self.serializer.serialize_value(&updated_previous_chunk)?; + object_store.put_val(&updated_previous_value)?; + } + + // update next if there + if let Some(next) = next { + let next_id = self + .serializer + .encode_key_as_string(&room_id.to_string(), next.to_string()); + // TODO unsafe unwrap()? + let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); + let next_chunk: Chunk = + self.serializer.deserialize_value(next_chunk_js_value)?; + + let updated_next_chunk = Chunk { + id: next_chunk.id, + previous: Some(new), + next: next_chunk.next, + type_str: next_chunk.type_str, + }; + let updated_next_value = + self.serializer.serialize_value(&updated_next_chunk)?; + + object_store.put_val(&updated_next_value)?; + } } Update::NewGapChunk { previous, new, next, gap } => { let tx = self.inner.transaction_on_one_with_mode( @@ -161,7 +212,7 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - let prev_token = self.serializer.serialize_value(&gap.prev_token)?; + // let prev_token = self.serializer.serialize_value(&gap.prev_token)?; let previous = previous.as_ref().map(ChunkIdentifier::index); let new = new.index(); @@ -169,15 +220,16 @@ impl_event_cache_store!({ trace!(%room_id,"Inserting new gap (prev={previous:?}, new={new}, next={next:?})"); - idb_operations::insert_chunk( - &object_store, - &hashed_room_id, + let chunk = Chunk { + id: format!("{room_id}-{new}"), previous, - new, next, - CHUNK_TYPE_GAP_TYPE_STRING, - ) - .await?; + type_str: CHUNK_TYPE_GAP_TYPE_STRING.to_owned(), + }; + + let serialized_value = self.serializer.serialize_value(&chunk)?; + + object_store.add_val(&serialized_value)?; let tx = self .inner @@ -185,7 +237,14 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::GAPS)?; - idb_operations::insert_gap(&object_store, &hashed_room_id, new, &prev_token)? + let gap = serde_json::json!({ + "id": format!("{room_id}-{new}"), + "prev_token": gap.prev_token + }); + + let serialized_gap = self.serializer.serialize_value(&gap)?; + + object_store.add_val(&serialized_gap)?; } Update::RemoveChunk(id) => { let tx = self.inner.transaction_on_one_with_mode( @@ -195,17 +254,58 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + let id = self + .serializer + .encode_key_as_string(&room_id.to_string(), &id.index().to_string()); + trace!("Removing chunk {id:?}"); // Remove the chunk itself - idb_operations::remove_chunk(&object_store, &hashed_room_id, id.index()) - .await?; - - // Now remove the events (if any) linked to the chunk - // What is the max events a LinkedChunk holds so I can iterate through all the possible values - // [1..MAX_LINKED_CHUNK_EVENTS].forEach(|i| { - // Do the deletion of all the events for the chunk - //}) + let chunk_to_delete_js_value = object_store.get_owned(id)?.await?.unwrap(); + let chunk_to_delete: Chunk = + self.serializer.deserialize_value(chunk_to_delete_js_value)?; + + if let Some(previous) = chunk_to_delete.previous { + let previous_id = self + .serializer + .encode_key_as_string(&room_id.to_string(), previous.to_string()); + let previous_chunk_js_value = + object_store.get_owned(&previous_id)?.await?.unwrap(); + let previous_chunk: Chunk = + self.serializer.deserialize_value(previous_chunk_js_value)?; + + let updated_previous_chunk = Chunk { + id: previous_id, + previous: previous_chunk.previous, + next: chunk_to_delete.next, + type_str: previous_chunk.type_str, + }; + let updated_previous_value = + self.serializer.serialize_value(&updated_previous_chunk)?; + object_store.put_val(&updated_previous_value)?; + } + + if let Some(next) = chunk_to_delete.next { + let next_id = self + .serializer + .encode_key_as_string(&room_id.to_string(), next.to_string()); + let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); + let next_chunk: Chunk = + self.serializer.deserialize_value(next_chunk_js_value)?; + + let updated_next_chunk = Chunk { + id: next_id, + previous: chunk_to_delete.previous, + next: next_chunk.next, + type_str: next_chunk.type_str, + }; + let updated_next_value = + self.serializer.serialize_value(&updated_next_chunk)?; + + object_store.put_val(&updated_next_value)?; + } + + object_store.delete_owned(id)?; } Update::PushItems { at, items } => { let chunk_id = at.chunk_identifier().index(); @@ -286,8 +386,10 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::EVENTS)?; - // We need to remove ALL the items >= index - // But since we are on no sql, we can't do a range query + let key_range = + self.serializer.encode_to_range(keys::EVENTS, chunk_id.to_string())?; + + object_store.get_all_with_key(&key_range)?.for_each(|entry| {}); } Update::StartReattachItems => {} Update::EndReattachItems => {} From d755cc0a57559128bdbd6925475734216698a3e2 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 11 Feb 2025 10:06:36 +0100 Subject: [PATCH 26/38] Change previous implementations to use serializer --- crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index e3a0a8c12f4..b000a54065d 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -320,13 +320,11 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::EVENTS)?; for (i, event) in items.into_iter().enumerate() { - let event_id = format!("{}-{}", chunk_id, i); - let event_id_js_value = JsValue::from_str(&event_id); let index = at.index() + i; // Can the ID be encrypted when inserting? - let value = serde_json::json!({ "id": event_id, "content": event, "room_id": room_id.to_string(), "position": index }); + let value = serde_json::json!({ "id": format!("{room_id}-{chunk_id}-{index}"), "content": event, "room_id": room_id.to_string(), "position": index }); let value = self.serializer.serialize_value(&value)?; - object_store.add_key_val(&event_id_js_value, &value)?; + object_store.add_val(&value)?; } } Update::ReplaceItem { at, item } => { From abee3abe204c13407cff2c8641f78324c4895bfb Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Tue, 11 Feb 2025 10:33:17 +0100 Subject: [PATCH 27/38] Do not interate through get_all_with_key --- .../src/event_cache_store/mod.rs | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index b000a54065d..c9a01058eea 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -17,6 +17,8 @@ mod idb_operations; mod indexeddb_serializer; mod migrations; +use std::future::IntoFuture; + use crate::event_cache_store::indexeddb_serializer::IndexeddbSerializer; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; @@ -322,9 +324,16 @@ impl_event_cache_store!({ for (i, event) in items.into_iter().enumerate() { let index = at.index() + i; // Can the ID be encrypted when inserting? - let value = serde_json::json!({ "id": format!("{room_id}-{chunk_id}-{index}"), "content": event, "room_id": room_id.to_string(), "position": index }); + let value = serde_json::json!({ + "id": format!("{room_id}-{chunk_id}-{index}"), + "content": event, + "room_id": room_id.to_string(), + "position": index + }); + let value = self.serializer.serialize_value(&value)?; - object_store.add_val(&value)?; + + object_store.add_val(&value)?.into_future().await?; } } Update::ReplaceItem { at, item } => { @@ -387,7 +396,7 @@ impl_event_cache_store!({ let key_range = self.serializer.encode_to_range(keys::EVENTS, chunk_id.to_string())?; - object_store.get_all_with_key(&key_range)?.for_each(|entry| {}); + // object_store.get_all_with_key(&key_range)?.for_each(|entry| {}); } Update::StartReattachItems => {} Update::EndReattachItems => {} From 06b03957f1b0c16f01b70dabd76954d224641200 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 13 Feb 2025 09:25:49 +0100 Subject: [PATCH 28/38] Fix encoded room id as ref --- .../src/event_cache_store/idb_operations.rs | 122 ------------------ .../src/event_cache_store/mod.rs | 21 +-- 2 files changed, 11 insertions(+), 132 deletions(-) delete mode 100644 crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs deleted file mode 100644 index 277169ec114..00000000000 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/idb_operations.rs +++ /dev/null @@ -1,122 +0,0 @@ -use gloo_utils::format::JsValueSerdeExt; -use indexed_db_futures::{idb_object_store::IdbObjectStore, IdbQuerySource}; -use serde::{Deserialize, Serialize}; -use wasm_bindgen::JsValue; - -#[derive(Serialize, Deserialize)] -struct Chunk { - id: String, - previous: Option, - new: u64, - next: Option, - type_str: String, -} - -pub async fn insert_chunk( - store: &IdbObjectStore<'_>, - hashed_room_id: &String, - previous: Option, - new: u64, - next: Option, - type_str: &str, -) -> Result<(), web_sys::DomException> { - // Insert new value - let id = format!("{}-{}", hashed_room_id, new); - let chunk = Chunk { id, previous, new, next, type_str: type_str.to_owned() }; - let value = JsValue::from_serde(&chunk).unwrap(); - store.add_val(&value)?; - - // Update previous if there - if let Some(previous) = previous { - let previous_id = format!("{}-{}", hashed_room_id, previous); - // TODO unsafe unwrap()? - let previous_chunk_js_value = store.get_owned(&previous_id)?.await?.unwrap(); - let previous_chunk: Chunk = previous_chunk_js_value.into_serde().unwrap(); - let updated_previous_chunk = Chunk { - id: previous_chunk.id, - previous: previous_chunk.previous, - new: previous_chunk.new, - next: Some(new), - type_str: previous_chunk.type_str, - }; - let updated_previous_value = JsValue::from_serde(&updated_previous_chunk).unwrap(); - store.put_val(&updated_previous_value)?; - } - - // update next if there - if let Some(next) = next { - let next_id = format!("{}-{}", hashed_room_id, next); - // TODO unsafe unwrap()? - let next_chunk_js_value = store.get_owned(&next_id)?.await?.unwrap(); - let next_chunk: Chunk = next_chunk_js_value.into_serde().unwrap(); - let updated_next_chunk = Chunk { - id: next_chunk.id, - previous: Some(new), - new: next_chunk.new, - next: next_chunk.next, - type_str: next_chunk.type_str, - }; - let updated_next_value = JsValue::from_serde(&updated_next_chunk).unwrap(); - store.put_val(&updated_next_value)?; - } - - Ok(()) -} - -pub fn insert_gap( - store: &IdbObjectStore<'_>, - hashed_room_id: &String, - new: u64, - prev_token: &JsValue, -) -> Result<(), web_sys::DomException> { - let id = format!("{}-{}", hashed_room_id, new); - let id = JsValue::from_str(&id); - store.add_key_val(&id, prev_token)?; - - Ok(()) -} - -pub async fn remove_chunk( - store: &IdbObjectStore<'_>, - hashed_room_id: &String, - id: u64, -) -> Result<(), web_sys::DomException> { - let id = format!("{}-{}", hashed_room_id, id); - - // Get current value, so we can later update prev and next - let chunk_to_delete_js_value = store.get_owned(id)?.await?.unwrap(); - let chunk_to_delete: Chunk = chunk_to_delete_js_value.into_serde().unwrap(); - - // Get previous value - if let Some(previous) = chunk_to_delete.previous { - let previous_id = format!("{}-{}", hashed_room_id, previous); - let previous_chunk_js_value = store.get_owned(previous_id)?.await?.unwrap(); - let mut previous_chunk: Chunk = previous_chunk_js_value.into_serde().unwrap(); - - previous_chunk.next = chunk_to_delete.next; - - // save modified chunk - let updated_previous_value = JsValue::from_serde(&previous_chunk).unwrap(); - store.put_val(&updated_previous_value)?; - } - - // Get next value if there and update it's previous - if let Some(next) = chunk_to_delete.next { - let next_id = format!("{}-{}", hashed_room_id, next); - let next_chunk_js_value = store.get_owned(next_id)?.await?.unwrap(); - let mut next_chunk: Chunk = next_chunk_js_value.into_serde().unwrap(); - - next_chunk.previous = chunk_to_delete.previous; - - // save modified chunk - let updated_next_value = JsValue::from_serde(&next_chunk).unwrap(); - store.put_val(&updated_next_value)?; - } - - store.delete_owned(chunk_to_delete.id)?; - - // TODO on the SQLite version, there is a cascading delete that deletes related entities - // we will have to implement those manually here - - Ok(()) -} diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index c9a01058eea..356aba559b3 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -13,7 +13,6 @@ // limitations under the License. mod builder; mod error; -mod idb_operations; mod indexeddb_serializer; mod migrations; @@ -166,7 +165,7 @@ impl_event_cache_store!({ if let Some(previous) = previous { let previous_id = self .serializer - .encode_key_as_string(&room_id.to_string(), previous.to_string()); + .encode_key_as_string(room_id.as_ref(), previous.to_string()); let previous_chunk_js_value = object_store.get_owned(&previous_id)?.await?.unwrap(); @@ -188,7 +187,7 @@ impl_event_cache_store!({ if let Some(next) = next { let next_id = self .serializer - .encode_key_as_string(&room_id.to_string(), next.to_string()); + .encode_key_as_string(room_id.as_ref(), next.to_string()); // TODO unsafe unwrap()? let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); let next_chunk: Chunk = @@ -258,19 +257,20 @@ impl_event_cache_store!({ let id = self .serializer - .encode_key_as_string(&room_id.to_string(), &id.index().to_string()); + .encode_key_as_string(room_id.as_ref(), id.index().to_string()); trace!("Removing chunk {id:?}"); // Remove the chunk itself - let chunk_to_delete_js_value = object_store.get_owned(id)?.await?.unwrap(); + let chunk_to_delete_js_value = + object_store.get_owned(id.clone())?.await?.unwrap(); let chunk_to_delete: Chunk = self.serializer.deserialize_value(chunk_to_delete_js_value)?; if let Some(previous) = chunk_to_delete.previous { let previous_id = self .serializer - .encode_key_as_string(&room_id.to_string(), previous.to_string()); + .encode_key_as_string(room_id.as_ref(), previous.to_string()); let previous_chunk_js_value = object_store.get_owned(&previous_id)?.await?.unwrap(); let previous_chunk: Chunk = @@ -290,7 +290,7 @@ impl_event_cache_store!({ if let Some(next) = chunk_to_delete.next { let next_id = self .serializer - .encode_key_as_string(&room_id.to_string(), next.to_string()); + .encode_key_as_string(room_id.as_ref(), next.to_string()); let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); let next_chunk: Chunk = self.serializer.deserialize_value(next_chunk_js_value)?; @@ -393,10 +393,11 @@ impl_event_cache_store!({ let object_store = tx.object_store(keys::EVENTS)?; - let key_range = - self.serializer.encode_to_range(keys::EVENTS, chunk_id.to_string())?; + let key_range = self + .serializer + .encode_to_range(keys::EVENTS, format!("{room_id}-{chunk_id}"))?; - // object_store.get_all_with_key(&key_range)?.for_each(|entry| {}); + object_store.get_all_with_key(&key_range)?.await.iter().for_each(|_entry| {}); } Update::StartReattachItems => {} Update::EndReattachItems => {} From ae25b35cb80e0c44796b08923f8e60529e1bc4c2 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 13 Feb 2025 11:13:21 +0100 Subject: [PATCH 29/38] finish DetachLastItems --- .../src/event_cache_store/mod.rs | 83 ++++++++++++------- crates/matrix-sdk/build.rs | 4 +- 2 files changed, 55 insertions(+), 32 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 356aba559b3..b3d98dacc51 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -22,6 +22,7 @@ use crate::event_cache_store::indexeddb_serializer::IndexeddbSerializer; use async_trait::async_trait; use indexed_db_futures::IdbDatabase; use indexed_db_futures::IdbQuerySource; +use matrix_sdk_base::deserialized_responses::TimelineEvent; use matrix_sdk_base::{ event_cache::{ store::{ @@ -99,18 +100,6 @@ impl IndexeddbEventCacheStore { type Result = std::result::Result; -#[cfg(target_arch = "wasm32")] -macro_rules! impl_event_cache_store { - ({ $($body:tt)* }) => { - #[async_trait(?Send)] - impl EventCacheStore for IndexeddbEventCacheStore { - type Error = IndexeddbEventCacheStoreError; - - $($body)* - } - }; -} - #[derive(Serialize, Deserialize)] struct Chunk { id: String, @@ -119,16 +108,42 @@ struct Chunk { type_str: String, } -#[cfg(not(target_arch = "wasm32"))] -macro_rules! impl_state_store { - ({ $($body:tt)* }) => { - impl IndexeddbEventCacheStore { - $($body)* - } - }; +#[derive(Serialize, Deserialize)] +struct TimelineEventForCache { + id: String, + content: TimelineEvent, + room_id: String, + position: usize, } -impl_event_cache_store!({ +// #[cfg(target_arch = "wasm32")] +// macro_rules! impl_event_cache_store { +// ({ $($body:tt)* }) => { +// #[async_trait(?Send)] +// impl EventCacheStore for IndexeddbEventCacheStore { +// type Error = IndexeddbEventCacheStoreError; + +// $($body)* +// } +// }; +// } + +// #[cfg(not(target_arch = "wasm32"))] +// macro_rules! impl_event_cache_store { +// ({ $($body:tt)* }) => { +// impl IndexeddbEventCacheStore { +// $($body)* +// } +// }; +// } + +// TODO We need to implement this trait only on wasm32 target +// But it kills autocomplete and inlay types in the IDE +// When things are ready to commit, should be replaced with the macro above +#[async_trait(?Send)] +impl EventCacheStore for IndexeddbEventCacheStore { + type Error = IndexeddbEventCacheStoreError; + async fn handle_linked_chunk_updates( &self, room_id: &RoomId, @@ -351,16 +366,16 @@ impl_event_cache_store!({ let event_id = format!("{}-{}", chunk_id, index); - let value = serde_json::json!({ - "id": event_id, - "content": item, - "room_id": room_id.to_string(), - "position": index - }); + let timeline_event = TimelineEventForCache { + id: event_id.clone(), + content: item, + room_id: room_id.to_string(), + position: index, + }; - let value = self.serializer.serialize_value(&value)?; + let value = self.serializer.serialize_value(&timeline_event)?; - object_store.put_key_val(&JsValue::from_str(&event_id), &value)?; + object_store.put_val(&value)?; } Update::RemoveItem { at } => { let chunk_id = at.chunk_identifier().index(); @@ -397,7 +412,15 @@ impl_event_cache_store!({ .serializer .encode_to_range(keys::EVENTS, format!("{room_id}-{chunk_id}"))?; - object_store.get_all_with_key(&key_range)?.await.iter().for_each(|_entry| {}); + let items = object_store.get_all_with_key(&key_range)?.await?; + + for item in items { + let event: TimelineEventForCache = + self.serializer.deserialize_value(item)?; + if event.position >= index { + object_store.delete(&JsValue::from_str(&event.id))?; + } + } } Update::StartReattachItems => {} Update::EndReattachItems => {} @@ -607,4 +630,4 @@ impl_event_cache_store!({ } } } -}); +} diff --git a/crates/matrix-sdk/build.rs b/crates/matrix-sdk/build.rs index b95a1276cb1..7ab6c314123 100644 --- a/crates/matrix-sdk/build.rs +++ b/crates/matrix-sdk/build.rs @@ -26,11 +26,11 @@ fn main() { let rustls_tls_set = env_is_set("CARGO_FEATURE_RUSTLS_TLS"); ensure( native_tls_set || rustls_tls_set, - "one of the features 'native-tls' or 'rustls-tls' must be enabled", + "only one of the features 'native-tls' or 'rustls-tls' can be enabled", ); ensure( !native_tls_set || !rustls_tls_set, - "only one of the features 'native-tls' or 'rustls-tls' can be enabled", + "one of the features 'native-tls' or 'rustls-tls' must be enabled", ); let is_wasm = env::var_os("CARGO_CFG_TARGET_ARCH").is_some_and(|arch| arch == "wasm32"); From 4dcf22aec201c1ce45935413838eb3e9fe6a40a5 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 13 Feb 2025 11:16:55 +0100 Subject: [PATCH 30/38] PushItems now uses TimeEventForCache struct --- .../src/event_cache_store/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index b3d98dacc51..b333658f318 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -339,12 +339,12 @@ impl EventCacheStore for IndexeddbEventCacheStore { for (i, event) in items.into_iter().enumerate() { let index = at.index() + i; // Can the ID be encrypted when inserting? - let value = serde_json::json!({ - "id": format!("{room_id}-{chunk_id}-{index}"), - "content": event, - "room_id": room_id.to_string(), - "position": index - }); + let value = TimelineEventForCache { + id: format!("{room_id}-{chunk_id}-{index}"), + content: event, + room_id: room_id.to_string(), + position: index, + }; let value = self.serializer.serialize_value(&value)?; From 610860bd7163c34b822c1a1ed64e78765a2a492d Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 13 Feb 2025 12:16:29 +0100 Subject: [PATCH 31/38] Implement Update::Clear --- .../src/event_cache_store/mod.rs | 51 +++++++++++++++++-- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index b333658f318..20d302de876 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -422,9 +422,54 @@ impl EventCacheStore for IndexeddbEventCacheStore { } } } - Update::StartReattachItems => {} - Update::EndReattachItems => {} - Update::Clear => {} + Update::StartReattachItems | Update::EndReattachItems => { + // Nothing? See sqlite implementation + } + Update::Clear => { + trace!(%room_id, "clearing all events"); + let linked_chunks_key_range = + self.serializer.encode_to_range(keys::LINKED_CHUNKS, room_id)?; + + let tx = self.inner.transaction_on_one_with_mode( + keys::LINKED_CHUNKS, + IdbTransactionMode::Readwrite, + )?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + + let linked_chunks = + object_store.get_all_with_key(&linked_chunks_key_range)?.await?; + + for linked_chunk in linked_chunks { + let linked_chunk: Chunk = + self.serializer.deserialize_value(linked_chunk)?; + // Delete all events for chunk + let events_key_range = self.serializer.encode_to_range( + keys::EVENTS, + format!("{}-{}", room_id, linked_chunk.id), + )?; + + let events_tx = self.inner.transaction_on_one_with_mode( + keys::EVENTS, + IdbTransactionMode::Readwrite, + )?; + + let events_object_store = events_tx.object_store(keys::EVENTS)?; + + let events = + events_object_store.get_all_with_key(&events_key_range)?.await?; + + for event in events { + let event: TimelineEventForCache = + self.serializer.deserialize_value(event)?; + let event_id = JsValue::from_str(&event.id); + events_object_store.delete(&event_id)?; + } + + let linked_chunk_id = JsValue::from_str(&linked_chunk.id); + object_store.delete(&linked_chunk_id)?; + } + } } } From ebda27765041d0612af76d1842a548e131658f7c Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 17 Feb 2025 15:00:43 +0100 Subject: [PATCH 32/38] Fixes on initialization of event-cache store --- .../src/event_cache_store/migrations.rs | 8 ++++++-- crates/matrix-sdk-indexeddb/src/lib.rs | 5 +++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 9296012d418..1d864fbc8d4 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -6,7 +6,7 @@ use indexed_db_futures::{ }; use wasm_bindgen::JsValue; -const CURRENT_DB_VERSION: u32 = 1; +const CURRENT_DB_VERSION: u32 = 2; /// data. This allows you to configure, how these cases should be handled. #[derive(Clone, Debug, PartialEq, Eq)] @@ -29,7 +29,7 @@ pub async fn open_and_upgrade_db( let old_version = db.version() as u32; - if old_version == 0 { + if old_version == 1 { // TODO some temporary code just to get going // Take a look at the state_store migrations // https://github.com/ospfranco/matrix-rust-sdk/blob/e49bda6f821d1b117c623dc9682e22337be16149/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs @@ -44,11 +44,15 @@ async fn setup_db(db: IdbDatabase, version: u32) -> Result { db.close(); let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&name, version)?; + db_req.set_on_upgrade_needed(Some( move |events: &IdbVersionChangeEvent| -> Result<(), JsValue> { let mut params = IdbObjectStoreParameters::new(); params.key_path(Some(&IdbKeyPath::from("id"))); events.db().create_object_store_with_params(keys::LINKED_CHUNKS, ¶ms)?; + + events.db().create_object_store_with_params(keys::EVENTS, ¶ms)?; + events.db().create_object_store_with_params(keys::GAPS, ¶ms)?; Ok(()) }, )); diff --git a/crates/matrix-sdk-indexeddb/src/lib.rs b/crates/matrix-sdk-indexeddb/src/lib.rs index 0d71f5e520b..5cdabb6d4c6 100644 --- a/crates/matrix-sdk-indexeddb/src/lib.rs +++ b/crates/matrix-sdk-indexeddb/src/lib.rs @@ -73,10 +73,11 @@ pub async fn open_state_store( /// Create an ['IndexeddbEventCacheStore'] #[cfg(feature = "event-cache-store")] pub async fn open_event_cache_store( - name: &str, + prefix: &str, store_cipher: Option>, ) -> Result { - let mut builder = IndexeddbEventCacheStore::builder().name(name.to_owned()); + let mut builder = + IndexeddbEventCacheStore::builder().name(format!("{prefix}-event-cache-store")); if let Some(store_cipher) = store_cipher { builder = builder.store_cipher(store_cipher); } From 1af016f005c4b45978abbd45c18b3750d491c5a0 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 17 Feb 2025 16:09:18 +0100 Subject: [PATCH 33/38] WIP, fixes for creating serializable objects --- .../event_cache_store/indexeddb_serializer.rs | 18 ++++ .../src/event_cache_store/migrations.rs | 2 +- .../src/event_cache_store/mod.rs | 89 ++++++++++--------- 3 files changed, 64 insertions(+), 45 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs index 3e5df437eff..89d49b99515 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs @@ -46,6 +46,12 @@ pub enum MaybeEncrypted { Unencrypted(String), } +#[derive(Debug, Deserialize, Serialize)] +struct WrapperObject { + id: String, + value: MaybeEncrypted, +} + impl IndexeddbSerializer { pub fn new(store_cipher: Option>) -> Self { Self { store_cipher } @@ -118,6 +124,18 @@ impl IndexeddbSerializer { Ok(serde_wasm_bindgen::to_value(&serialized)?) } + pub fn serialize_into_object( + &self, + id: &str, + value: &impl Serialize, + ) -> Result { + let serialized = self.maybe_encrypt_value(value)?; + + let res_obj = WrapperObject { id: id.to_string(), value: serialized }; + + Ok(serde_wasm_bindgen::to_value(&res_obj)?) + } + /// Encode the value for storage as a value in indexeddb. /// /// Returns a byte vector which is either the JSON serialisation of the diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs index 1d864fbc8d4..091ac9fd5b1 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/migrations.rs @@ -50,7 +50,7 @@ async fn setup_db(db: IdbDatabase, version: u32) -> Result { let mut params = IdbObjectStoreParameters::new(); params.key_path(Some(&IdbKeyPath::from("id"))); events.db().create_object_store_with_params(keys::LINKED_CHUNKS, ¶ms)?; - + events.db().create_object_store(keys::CORE)?; events.db().create_object_store_with_params(keys::EVENTS, ¶ms)?; events.db().create_object_store_with_params(keys::GAPS, ¶ms)?; Ok(()) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 20d302de876..cff43e716df 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -96,6 +96,11 @@ impl IndexeddbEventCacheStore { pub fn builder() -> IndexeddbEventCacheStoreBuilder { IndexeddbEventCacheStoreBuilder::new() } + + pub fn get_id(&self, room_id: &str, object_id: &str) -> String { + let id_raw = format!("{}-{}", room_id, object_id); + self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) + } } type Result = std::result::Result; @@ -103,8 +108,8 @@ type Result = std::result::Result; #[derive(Serialize, Deserialize)] struct Chunk { id: String, - previous: Option, - next: Option, + previous: Option, + next: Option, type_str: String, } @@ -150,6 +155,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { updates: Vec>, ) -> Result<()> { for update in updates { + // web_sys::console::log_1(&format!("🟦 Trying to handle update {:?}", update).into()); match update { Update::NewItemsChunk { previous, new, next } => { let tx = self.inner.transaction_on_one_with_mode( @@ -159,63 +165,65 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - let previous = previous.as_ref().map(ChunkIdentifier::index); + let previous = previous + .as_ref() + .map(ChunkIdentifier::index) + .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); let new = new.index(); - let next = next.as_ref().map(ChunkIdentifier::index); + let next = next + .as_ref() + .map(ChunkIdentifier::index) + .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); + let id = self.get_id(room_id.as_ref(), new.to_string().as_ref()); + let chunk = Chunk { - id: format!("{room_id}-{new}"), - previous, - next, + id: id.clone(), + previous: previous.clone(), + next: next.clone(), type_str: CHUNK_TYPE_EVENT_TYPE_STRING.to_owned(), }; - let serialized_value = self.serializer.serialize_value(&chunk)?; + let serialized_value = self.serializer.serialize_into_object(&id, &chunk)?; object_store.add_val(&serialized_value)?; // Update previous if there if let Some(previous) = previous { - let previous_id = self - .serializer - .encode_key_as_string(room_id.as_ref(), previous.to_string()); let previous_chunk_js_value = - object_store.get_owned(&previous_id)?.await?.unwrap(); + object_store.get_owned(&previous)?.await?.unwrap(); let previous_chunk: Chunk = self.serializer.deserialize_value(previous_chunk_js_value)?; let updated_previous_chunk = Chunk { - id: previous_id, + id: previous.clone(), previous: previous_chunk.previous, - next: Some(new), + next: Some(id.clone()), type_str: previous_chunk.type_str, }; - let updated_previous_value = - self.serializer.serialize_value(&updated_previous_chunk)?; + let updated_previous_value = self + .serializer + .serialize_into_object(&previous, &updated_previous_chunk)?; object_store.put_val(&updated_previous_value)?; } // update next if there if let Some(next) = next { - let next_id = self - .serializer - .encode_key_as_string(room_id.as_ref(), next.to_string()); - // TODO unsafe unwrap()? - let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); + let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); let next_chunk: Chunk = self.serializer.deserialize_value(next_chunk_js_value)?; let updated_next_chunk = Chunk { - id: next_chunk.id, - previous: Some(new), + id: next.clone(), + previous: Some(id), next: next_chunk.next, type_str: next_chunk.type_str, }; let updated_next_value = - self.serializer.serialize_value(&updated_next_chunk)?; + self.serializer.serialize_into_object(&next, &updated_next_chunk)?; object_store.put_val(&updated_next_value)?; } @@ -238,8 +246,8 @@ impl EventCacheStore for IndexeddbEventCacheStore { let chunk = Chunk { id: format!("{room_id}-{new}"), - previous, - next, + previous: previous.map(|n| n.to_string()), + next: next.map(|n| n.to_string()), type_str: CHUNK_TYPE_GAP_TYPE_STRING.to_owned(), }; @@ -270,9 +278,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - let id = self - .serializer - .encode_key_as_string(room_id.as_ref(), id.index().to_string()); + let id = self.get_id(room_id.as_ref(), id.index().to_string().as_ref()); trace!("Removing chunk {id:?}"); @@ -282,42 +288,37 @@ impl EventCacheStore for IndexeddbEventCacheStore { let chunk_to_delete: Chunk = self.serializer.deserialize_value(chunk_to_delete_js_value)?; - if let Some(previous) = chunk_to_delete.previous { - let previous_id = self - .serializer - .encode_key_as_string(room_id.as_ref(), previous.to_string()); + if let Some(previous) = chunk_to_delete.previous.clone() { let previous_chunk_js_value = - object_store.get_owned(&previous_id)?.await?.unwrap(); + object_store.get_owned(&previous)?.await?.unwrap(); let previous_chunk: Chunk = self.serializer.deserialize_value(previous_chunk_js_value)?; let updated_previous_chunk = Chunk { - id: previous_id, + id: previous.clone(), previous: previous_chunk.previous, - next: chunk_to_delete.next, + next: chunk_to_delete.next.clone(), type_str: previous_chunk.type_str, }; - let updated_previous_value = - self.serializer.serialize_value(&updated_previous_chunk)?; + let updated_previous_value = self + .serializer + .serialize_into_object(&previous, &updated_previous_chunk)?; object_store.put_val(&updated_previous_value)?; } if let Some(next) = chunk_to_delete.next { - let next_id = self - .serializer - .encode_key_as_string(room_id.as_ref(), next.to_string()); - let next_chunk_js_value = object_store.get_owned(&next_id)?.await?.unwrap(); + let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); let next_chunk: Chunk = self.serializer.deserialize_value(next_chunk_js_value)?; let updated_next_chunk = Chunk { - id: next_id, + id: next.clone(), previous: chunk_to_delete.previous, next: next_chunk.next, type_str: next_chunk.type_str, }; let updated_next_value = - self.serializer.serialize_value(&updated_next_chunk)?; + self.serializer.serialize_into_object(&next, &updated_next_chunk)?; object_store.put_val(&updated_next_value)?; } From 6d8b40516bd3ab84af207caf855b62a30c2cc2ce Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 17 Feb 2025 16:55:16 +0100 Subject: [PATCH 34/38] More fixes on serialization and deserialization --- .../event_cache_store/indexeddb_serializer.rs | 12 ++- .../src/event_cache_store/mod.rs | 87 +++++++++++++------ 2 files changed, 70 insertions(+), 29 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs index 89d49b99515..72b411c5419 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/indexeddb_serializer.rs @@ -131,7 +131,7 @@ impl IndexeddbSerializer { ) -> Result { let serialized = self.maybe_encrypt_value(value)?; - let res_obj = WrapperObject { id: id.to_string(), value: serialized }; + let res_obj = WrapperObject { id: id.to_owned(), value: serialized }; Ok(serde_wasm_bindgen::to_value(&res_obj)?) } @@ -231,6 +231,16 @@ impl IndexeddbSerializer { self.deserialize_legacy_value(value) } + pub fn deserialize_into_object( + &self, + value: JsValue, + ) -> Result { + let obj: WrapperObject = value.into_serde()?; + let deserialized: T = self.maybe_decrypt_value(obj.value)?; + + Ok(deserialized) + } + /// Decode a value that was encoded with an old version of /// `serialize_value`. /// diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index cff43e716df..3bd6219458b 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -101,6 +101,11 @@ impl IndexeddbEventCacheStore { let id_raw = format!("{}-{}", room_id, object_id); self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) } + + pub fn get_event_id(&self, room_id: &str, chunk_id: &str, index: usize) -> String { + let id_raw = format!("{}-{}", chunk_id, index); + self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) + } } type Result = std::result::Result; @@ -113,6 +118,12 @@ struct Chunk { type_str: String, } +#[derive(Serialize, Deserialize)] +struct IndexedDbGap { + id: String, + prev_token: String, +} + #[derive(Serialize, Deserialize)] struct TimelineEventForCache { id: String, @@ -155,7 +166,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { updates: Vec>, ) -> Result<()> { for update in updates { - // web_sys::console::log_1(&format!("🟦 Trying to handle update {:?}", update).into()); + web_sys::console::log_1(&format!("🟦 Trying to handle update {:?}", update).into()); match update { Update::NewItemsChunk { previous, new, next } => { let tx = self.inner.transaction_on_one_with_mode( @@ -188,15 +199,19 @@ impl EventCacheStore for IndexeddbEventCacheStore { let serialized_value = self.serializer.serialize_into_object(&id, &chunk)?; - object_store.add_val(&serialized_value)?; + let req = object_store.put_val(&serialized_value)?; + + req.await?; // Update previous if there if let Some(previous) = previous { - let previous_chunk_js_value = - object_store.get_owned(&previous)?.await?.unwrap(); + let previous_chunk_js_value = object_store + .get_owned(&previous)? + .await? + .expect("Previous chunk not found"); let previous_chunk: Chunk = - self.serializer.deserialize_value(previous_chunk_js_value)?; + self.serializer.deserialize_into_object(previous_chunk_js_value)?; let updated_previous_chunk = Chunk { id: previous.clone(), @@ -204,9 +219,11 @@ impl EventCacheStore for IndexeddbEventCacheStore { next: Some(id.clone()), type_str: previous_chunk.type_str, }; + let updated_previous_value = self .serializer .serialize_into_object(&previous, &updated_previous_chunk)?; + object_store.put_val(&updated_previous_value)?; } @@ -214,7 +231,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { if let Some(next) = next { let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); let next_chunk: Chunk = - self.serializer.deserialize_value(next_chunk_js_value)?; + self.serializer.deserialize_into_object(next_chunk_js_value)?; let updated_next_chunk = Chunk { id: next.clone(), @@ -222,6 +239,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { next: next_chunk.next, type_str: next_chunk.type_str, }; + let updated_next_value = self.serializer.serialize_into_object(&next, &updated_next_chunk)?; @@ -236,22 +254,28 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - // let prev_token = self.serializer.serialize_value(&gap.prev_token)?; - - let previous = previous.as_ref().map(ChunkIdentifier::index); + let previous = previous + .as_ref() + .map(ChunkIdentifier::index) + .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); let new = new.index(); - let next = next.as_ref().map(ChunkIdentifier::index); + let next = next + .as_ref() + .map(ChunkIdentifier::index) + .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); + + let id = self.get_id(room_id.as_ref(), new.to_string().as_ref()); trace!(%room_id,"Inserting new gap (prev={previous:?}, new={new}, next={next:?})"); let chunk = Chunk { - id: format!("{room_id}-{new}"), - previous: previous.map(|n| n.to_string()), - next: next.map(|n| n.to_string()), + id: id.clone(), + previous, + next, type_str: CHUNK_TYPE_GAP_TYPE_STRING.to_owned(), }; - let serialized_value = self.serializer.serialize_value(&chunk)?; + let serialized_value = self.serializer.serialize_into_object(&id, &chunk)?; object_store.add_val(&serialized_value)?; @@ -261,12 +285,9 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::GAPS)?; - let gap = serde_json::json!({ - "id": format!("{room_id}-{new}"), - "prev_token": gap.prev_token - }); + let gap = IndexedDbGap { id: id.clone(), prev_token: gap.prev_token }; - let serialized_gap = self.serializer.serialize_value(&gap)?; + let serialized_gap = self.serializer.serialize_into_object(&id, &gap)?; object_store.add_val(&serialized_gap)?; } @@ -286,13 +307,13 @@ impl EventCacheStore for IndexeddbEventCacheStore { let chunk_to_delete_js_value = object_store.get_owned(id.clone())?.await?.unwrap(); let chunk_to_delete: Chunk = - self.serializer.deserialize_value(chunk_to_delete_js_value)?; + self.serializer.deserialize_into_object(chunk_to_delete_js_value)?; if let Some(previous) = chunk_to_delete.previous.clone() { let previous_chunk_js_value = object_store.get_owned(&previous)?.await?.unwrap(); let previous_chunk: Chunk = - self.serializer.deserialize_value(previous_chunk_js_value)?; + self.serializer.deserialize_into_object(previous_chunk_js_value)?; let updated_previous_chunk = Chunk { id: previous.clone(), @@ -309,7 +330,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { if let Some(next) = chunk_to_delete.next { let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); let next_chunk: Chunk = - self.serializer.deserialize_value(next_chunk_js_value)?; + self.serializer.deserialize_into_object(next_chunk_js_value)?; let updated_next_chunk = Chunk { id: next.clone(), @@ -339,17 +360,22 @@ impl EventCacheStore for IndexeddbEventCacheStore { for (i, event) in items.into_iter().enumerate() { let index = at.index() + i; - // Can the ID be encrypted when inserting? + let id = self.get_event_id( + room_id.as_ref(), + chunk_id.to_string().as_ref(), + index, + ); + let value = TimelineEventForCache { - id: format!("{room_id}-{chunk_id}-{index}"), + id: id.clone(), content: event, room_id: room_id.to_string(), position: index, }; - let value = self.serializer.serialize_value(&value)?; + let value = self.serializer.serialize_into_object(&id, &value)?; - object_store.add_val(&value)?.into_future().await?; + object_store.put_val(&value)?.into_future().await?; } } Update::ReplaceItem { at, item } => { @@ -365,7 +391,11 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::EVENTS)?; - let event_id = format!("{}-{}", chunk_id, index); + let event_id = self.get_event_id( + room_id.to_string().as_ref(), + chunk_id.to_string().as_ref(), + index, + ); let timeline_event = TimelineEventForCache { id: event_id.clone(), @@ -374,7 +404,8 @@ impl EventCacheStore for IndexeddbEventCacheStore { position: index, }; - let value = self.serializer.serialize_value(&timeline_event)?; + let value = + self.serializer.serialize_into_object(&event_id, &timeline_event)?; object_store.put_val(&value)?; } From 4179750cbd19ace1331588f373054cace5accdd5 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Thu, 20 Feb 2025 13:01:02 +0100 Subject: [PATCH 35/38] Working retrieving of events --- .../src/event_cache_store/mod.rs | 193 ++++++++++++++++-- 1 file changed, 173 insertions(+), 20 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 3bd6219458b..1b55225eea9 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -23,6 +23,7 @@ use async_trait::async_trait; use indexed_db_futures::IdbDatabase; use indexed_db_futures::IdbQuerySource; use matrix_sdk_base::deserialized_responses::TimelineEvent; +use matrix_sdk_base::linked_chunk; use matrix_sdk_base::{ event_cache::{ store::{ @@ -46,6 +47,7 @@ use matrix_sdk_base::{ // UniqueKey }; +use ruma::events::policy::rule::room; use ruma::{ // time::SystemTime, MilliSecondsSinceUnixEpoch, @@ -57,6 +59,7 @@ use serde::Deserialize; use serde::Serialize; use tracing::trace; use wasm_bindgen::JsValue; +use web_sys::IdbKeyRange; use web_sys::IdbTransactionMode; pub use builder::IndexeddbEventCacheStoreBuilder; @@ -106,11 +109,23 @@ impl IndexeddbEventCacheStore { let id_raw = format!("{}-{}", chunk_id, index); self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) } + + pub fn get_chunk_id(&self, id: &Option) -> Option { + match id { + Some(id) => { + let mut parts = id.splitn(2, '-'); + let room_id = parts.next().unwrap().to_owned(); + let object_id = parts.next().unwrap().parse::().unwrap(); + Some(object_id) + } + None => None, + } + } } type Result = std::result::Result; -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] struct Chunk { id: String, previous: Option, @@ -118,13 +133,12 @@ struct Chunk { type_str: String, } -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] struct IndexedDbGap { - id: String, prev_token: String, } -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] struct TimelineEventForCache { id: String, content: TimelineEvent, @@ -166,7 +180,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { updates: Vec>, ) -> Result<()> { for update in updates { - web_sys::console::log_1(&format!("🟦 Trying to handle update {:?}", update).into()); + // web_sys::console::log_1(&format!("🟦 Trying to handle update {:?}", update).into()); match update { Update::NewItemsChunk { previous, new, next } => { let tx = self.inner.transaction_on_one_with_mode( @@ -180,15 +194,14 @@ impl EventCacheStore for IndexeddbEventCacheStore { .as_ref() .map(ChunkIdentifier::index) .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); - let new = new.index(); + + let id = self.get_id(room_id.as_ref(), new.index().to_string().as_ref()); let next = next .as_ref() .map(ChunkIdentifier::index) .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); - trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={new}, next={next:?})"); - - let id = self.get_id(room_id.as_ref(), new.to_string().as_ref()); + trace!(%room_id, "Inserting new chunk (prev={previous:?}, new={id}, next={next:?})"); let chunk = Chunk { id: id.clone(), @@ -214,7 +227,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { self.serializer.deserialize_into_object(previous_chunk_js_value)?; let updated_previous_chunk = Chunk { - id: previous.clone(), + id: previous_chunk.id, previous: previous_chunk.previous, next: Some(id.clone()), type_str: previous_chunk.type_str, @@ -234,7 +247,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { self.serializer.deserialize_into_object(next_chunk_js_value)?; let updated_next_chunk = Chunk { - id: next.clone(), + id: next_chunk.id, previous: Some(id), next: next_chunk.next, type_str: next_chunk.type_str, @@ -258,20 +271,19 @@ impl EventCacheStore for IndexeddbEventCacheStore { .as_ref() .map(ChunkIdentifier::index) .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); - let new = new.index(); + + let id = self.get_id(room_id.as_ref(), new.index().to_string().as_ref()); let next = next .as_ref() .map(ChunkIdentifier::index) .map(|n| self.get_id(room_id.as_ref(), n.to_string().as_ref())); - let id = self.get_id(room_id.as_ref(), new.to_string().as_ref()); - - trace!(%room_id,"Inserting new gap (prev={previous:?}, new={new}, next={next:?})"); + trace!(%room_id,"Inserting new gap (prev={previous:?}, new={id}, next={next:?})"); let chunk = Chunk { id: id.clone(), - previous, - next, + previous: previous.clone(), + next: next.clone(), type_str: CHUNK_TYPE_GAP_TYPE_STRING.to_owned(), }; @@ -279,13 +291,55 @@ impl EventCacheStore for IndexeddbEventCacheStore { object_store.add_val(&serialized_value)?; + if let Some(previous) = previous { + let previous_chunk_js_value = object_store + .get_owned(&previous)? + .await? + .expect("Previous chunk not found"); + + let previous_chunk: Chunk = + self.serializer.deserialize_into_object(previous_chunk_js_value)?; + + let updated_previous_chunk = Chunk { + id: previous_chunk.id, + previous: previous_chunk.previous, + next: Some(id.clone()), + type_str: previous_chunk.type_str, + }; + + let updated_previous_value = self + .serializer + .serialize_into_object(&previous, &updated_previous_chunk)?; + + object_store.put_val(&updated_previous_value)?; + } + + // update next if there + if let Some(next) = next { + let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); + let next_chunk: Chunk = + self.serializer.deserialize_into_object(next_chunk_js_value)?; + + let updated_next_chunk = Chunk { + id: next_chunk.id, + previous: Some(id.clone()), + next: next_chunk.next, + type_str: next_chunk.type_str, + }; + + let updated_next_value = + self.serializer.serialize_into_object(&next, &updated_next_chunk)?; + + object_store.put_val(&updated_next_value)?; + } + let tx = self .inner .transaction_on_one_with_mode(keys::GAPS, IdbTransactionMode::Readwrite)?; let object_store = tx.object_store(keys::GAPS)?; - let gap = IndexedDbGap { id: id.clone(), prev_token: gap.prev_token }; + let gap = IndexedDbGap { prev_token: gap.prev_token }; let serialized_gap = self.serializer.serialize_into_object(&id, &gap)?; @@ -510,8 +564,107 @@ impl EventCacheStore for IndexeddbEventCacheStore { /// Return all the raw components of a linked chunk, so the caller may /// reconstruct the linked chunk later. - async fn reload_linked_chunk(&self, _room_id: &RoomId) -> Result>> { - Ok(vec![]) + async fn reload_linked_chunk(&self, room_id: &RoomId) -> Result>> { + // web_sys::console::log_1(&format!("🟦 reload_linked_chunk for room {}", room_id).into()); + let tx = self + .inner + .transaction_on_one_with_mode(keys::LINKED_CHUNKS, IdbTransactionMode::Readonly)?; + + let object_store = tx.object_store(keys::LINKED_CHUNKS)?; + + // let key_range = self.serializer.encode_to_range(room_id.as_ref(), room_id)?; + let key_range = IdbKeyRange::bound( + &JsValue::from_str(&format!("{}-", room_id)), + &JsValue::from_str(&format!("{}-\u{FFFF}", room_id)), + ) + .unwrap(); + + let linked_chunks = object_store.get_all_with_key_owned(&key_range)?.await?; + + // web_sys::console::log_1(&format!("🟦 found chunks: {}", linked_chunks.length()).into()); + + let mut raw_chunks = Vec::new(); + + for linked_chunk in linked_chunks { + let linked_chunk: Chunk = self.serializer.deserialize_into_object(linked_chunk)?; + // TODO unwrap + let chunk_id = self.get_chunk_id(&Some(linked_chunk.id.clone())).unwrap(); + let previous_chunk_id = self.get_chunk_id(&linked_chunk.previous); + let next_chunk_id = self.get_chunk_id(&linked_chunk.next); + + if (linked_chunk.type_str == CHUNK_TYPE_GAP_TYPE_STRING) { + let gaps_tx = self + .inner + .transaction_on_one_with_mode(keys::GAPS, IdbTransactionMode::Readonly)?; + + let gaps_object_store = gaps_tx.object_store(keys::GAPS)?; + + let gap_id = linked_chunk.id; + // web_sys::console::log_1(&format!("🟦 Trying to get gap {:?}", gap_id).into()); + let gap_id_js_value = JsValue::from_str(&gap_id); + let gap_js_value = gaps_object_store.get(&gap_id_js_value)?.await?; + // web_sys::console::log_1(&format!("🟦 got gap {:?}", gap_js_value).into()); + let gap: IndexedDbGap = + self.serializer.deserialize_into_object(gap_js_value.unwrap())?; + // web_sys::console::log_1(&format!("🟦 deserializing gap {:?}", gap).into()); + + let gap = Gap { prev_token: gap.prev_token }; + + let raw_chunk = RawChunk { + identifier: ChunkIdentifier::new(chunk_id), + content: linked_chunk::ChunkContent::Gap(gap), + previous: previous_chunk_id.map(ChunkIdentifier::new), + next: next_chunk_id.map(ChunkIdentifier::new), + }; + + // web_sys::console::log_1(&format!("🟩 pushing gap chunk {:?}", raw_chunk).into()); + raw_chunks.push(raw_chunk); + } else { + let events_tx = self + .inner + .transaction_on_one_with_mode(keys::EVENTS, IdbTransactionMode::Readonly)?; + + let events_object_store = events_tx.object_store(keys::EVENTS)?; + + // let events_key_range = + // self.serializer.encode_to_range(room_id.as_ref(), linked_chunk.id)?; + let events_key_range = IdbKeyRange::bound( + &JsValue::from_str(&format!("{}-", chunk_id)), + &JsValue::from_str(&format!("{}-\u{FFFF}", chunk_id)), + ) + .unwrap(); + + let events = events_object_store.get_all_with_key(&events_key_range)?.await?; + // web_sys::console::log_1( + // &format!("🟦 Found events for chunk {:?}", events.length()).into(), + // ); + let mut events_vec = Vec::new(); + + for event in events { + // web_sys::console::log_1(&format!("🟦 deserializing {:?}", event).into()); + let event: TimelineEventForCache = + self.serializer.deserialize_into_object(event)?; + // web_sys::console::log_1(&format!("🟦 Event for chunk {:?}", event).into()); + events_vec.push(event.content); + } + + let raw_chunk = RawChunk { + identifier: ChunkIdentifier::new(chunk_id), + content: linked_chunk::ChunkContent::Items(events_vec), + previous: previous_chunk_id.map(ChunkIdentifier::new), + next: next_chunk_id.map(ChunkIdentifier::new), + }; + + // web_sys::console::log_1(&format!("🟩 pushing event chunk {:?}", raw_chunk).into()); + raw_chunks.push(raw_chunk); + } + } + + // web_sys::console::log_1( + // &format!("🟦 Returning reconstructed chunks {:?}", raw_chunks).into(), + // ); + + Ok(raw_chunks) } /// Clear persisted events for all the rooms. From 6c7b729d8337bd75bbcbaf1ada65a7c4141aec43 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 21 Feb 2025 13:59:07 +0100 Subject: [PATCH 36/38] Update key handling --- .../src/event_cache_store/mod.rs | 32 +++++++------------ 1 file changed, 11 insertions(+), 21 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 1b55225eea9..b23e494c7bd 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -77,6 +77,7 @@ mod keys { // pub const MEDIA: &str = "media"; } +pub const KEY_SEPARATOR: &str = "\u{001D}"; /// The string used to identify a chunk of type events, in the `type` field in /// the database. const CHUNK_TYPE_EVENT_TYPE_STRING: &str = "E"; @@ -101,19 +102,19 @@ impl IndexeddbEventCacheStore { } pub fn get_id(&self, room_id: &str, object_id: &str) -> String { - let id_raw = format!("{}-{}", room_id, object_id); + let id_raw = [room_id, KEY_SEPARATOR, object_id].concat(); self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) } pub fn get_event_id(&self, room_id: &str, chunk_id: &str, index: usize) -> String { - let id_raw = format!("{}-{}", chunk_id, index); + let id_raw = [chunk_id, KEY_SEPARATOR, &index.to_string()].concat(); self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) } pub fn get_chunk_id(&self, id: &Option) -> Option { match id { Some(id) => { - let mut parts = id.splitn(2, '-'); + let mut parts = id.splitn(2, KEY_SEPARATOR); let room_id = parts.next().unwrap().to_owned(); let object_id = parts.next().unwrap().parse::().unwrap(); Some(object_id) @@ -476,7 +477,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::EVENTS)?; - let event_id = format!("{}-{}", chunk_id, index); + let event_id = format!("{}{}{}", chunk_id, KEY_SEPARATOR, index); let event_id_js_value = JsValue::from_str(&event_id); object_store.delete(&event_id_js_value)?; @@ -532,7 +533,7 @@ impl EventCacheStore for IndexeddbEventCacheStore { // Delete all events for chunk let events_key_range = self.serializer.encode_to_range( keys::EVENTS, - format!("{}-{}", room_id, linked_chunk.id), + format!("{}{}{}", room_id, KEY_SEPARATOR, linked_chunk.id), )?; let events_tx = self.inner.transaction_on_one_with_mode( @@ -565,23 +566,17 @@ impl EventCacheStore for IndexeddbEventCacheStore { /// Return all the raw components of a linked chunk, so the caller may /// reconstruct the linked chunk later. async fn reload_linked_chunk(&self, room_id: &RoomId) -> Result>> { - // web_sys::console::log_1(&format!("🟦 reload_linked_chunk for room {}", room_id).into()); let tx = self .inner .transaction_on_one_with_mode(keys::LINKED_CHUNKS, IdbTransactionMode::Readonly)?; let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - // let key_range = self.serializer.encode_to_range(room_id.as_ref(), room_id)?; - let key_range = IdbKeyRange::bound( - &JsValue::from_str(&format!("{}-", room_id)), - &JsValue::from_str(&format!("{}-\u{FFFF}", room_id)), - ) - .unwrap(); + let key_range = IdbKeyRange::lower_bound(&JsValue::from_str(&room_id.as_str())).unwrap(); - let linked_chunks = object_store.get_all_with_key_owned(&key_range)?.await?; + let linked_chunks = object_store.get_all_with_key_owned(key_range)?.await?; - // web_sys::console::log_1(&format!("🟦 found chunks: {}", linked_chunks.length()).into()); + web_sys::console::log_1(&format!("🟦 found chunks: {}", linked_chunks.length()).into()); let mut raw_chunks = Vec::new(); @@ -626,13 +621,8 @@ impl EventCacheStore for IndexeddbEventCacheStore { let events_object_store = events_tx.object_store(keys::EVENTS)?; - // let events_key_range = - // self.serializer.encode_to_range(room_id.as_ref(), linked_chunk.id)?; - let events_key_range = IdbKeyRange::bound( - &JsValue::from_str(&format!("{}-", chunk_id)), - &JsValue::from_str(&format!("{}-\u{FFFF}", chunk_id)), - ) - .unwrap(); + let events_key_range = + IdbKeyRange::lower_bound(&JsValue::from_str(&chunk_id.to_string())).unwrap(); let events = events_object_store.get_all_with_key(&events_key_range)?.await?; // web_sys::console::log_1( From c686f9015f748315024076229ff79f2e61685813 Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Fri, 21 Feb 2025 15:25:15 +0100 Subject: [PATCH 37/38] Fix ranges --- .../src/event_cache_store/mod.rs | 36 +++++++++++++------ 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index b23e494c7bd..5859a30e982 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -107,7 +107,7 @@ impl IndexeddbEventCacheStore { } pub fn get_event_id(&self, room_id: &str, chunk_id: &str, index: usize) -> String { - let id_raw = [chunk_id, KEY_SEPARATOR, &index.to_string()].concat(); + let id_raw = [room_id, KEY_SEPARATOR, chunk_id, KEY_SEPARATOR, &index.to_string()].concat(); self.serializer.encode_key_as_string(room_id.as_ref(), id_raw) } @@ -317,7 +317,8 @@ impl EventCacheStore for IndexeddbEventCacheStore { // update next if there if let Some(next) = next { - let next_chunk_js_value = object_store.get_owned(&next)?.await?.unwrap(); + let next_chunk_js_value = + object_store.get_owned(&next)?.await?.expect("Next chunk not found"); let next_chunk: Chunk = self.serializer.deserialize_into_object(next_chunk_js_value)?; @@ -495,9 +496,9 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::EVENTS)?; - let key_range = self - .serializer - .encode_to_range(keys::EVENTS, format!("{room_id}-{chunk_id}"))?; + let key_range = + IdbKeyRange::lower_bound(&JsValue::from_str(&chunk_id.to_string())) + .unwrap(); let items = object_store.get_all_with_key(&key_range)?.await?; @@ -572,11 +573,17 @@ impl EventCacheStore for IndexeddbEventCacheStore { let object_store = tx.object_store(keys::LINKED_CHUNKS)?; - let key_range = IdbKeyRange::lower_bound(&JsValue::from_str(&room_id.as_str())).unwrap(); + let lower = JsValue::from_str(&room_id.as_ref()); + let upper = JsValue::from_str(&(room_id.to_string() + "\u{FFFF}")); // Ensure all keys start with room_id + + let key_range = IdbKeyRange::bound(&lower, &upper).unwrap(); let linked_chunks = object_store.get_all_with_key_owned(key_range)?.await?; - web_sys::console::log_1(&format!("🟦 found chunks: {}", linked_chunks.length()).into()); + // web_sys::console::log_1( + // &format!("🟦 found chunks: {} for room: {}", linked_chunks.length(), room_id.as_str()) + // .into(), + // ); let mut raw_chunks = Vec::new(); @@ -597,8 +604,9 @@ impl EventCacheStore for IndexeddbEventCacheStore { let gap_id = linked_chunk.id; // web_sys::console::log_1(&format!("🟦 Trying to get gap {:?}", gap_id).into()); let gap_id_js_value = JsValue::from_str(&gap_id); - let gap_js_value = gaps_object_store.get(&gap_id_js_value)?.await?; + let gap_js_value = gaps_object_store.get_owned(&gap_id_js_value)?.await?; // web_sys::console::log_1(&format!("🟦 got gap {:?}", gap_js_value).into()); + let gap: IndexedDbGap = self.serializer.deserialize_into_object(gap_js_value.unwrap())?; // web_sys::console::log_1(&format!("🟦 deserializing gap {:?}", gap).into()); @@ -621,8 +629,16 @@ impl EventCacheStore for IndexeddbEventCacheStore { let events_object_store = events_tx.object_store(keys::EVENTS)?; - let events_key_range = - IdbKeyRange::lower_bound(&JsValue::from_str(&chunk_id.to_string())).unwrap(); + // let events_key_range = + // IdbKeyRange::lower_bound(&JsValue::from_str(&chunk_id.to_string())).unwrap(); + + let lower = + JsValue::from_str(&self.get_id(room_id.as_ref(), &chunk_id.to_string())); + let upper = JsValue::from_str( + &(self.get_id(room_id.as_ref(), &chunk_id.to_string()) + "\u{FFFF}"), + ); // Ensure all keys start with room_id + + let events_key_range = IdbKeyRange::bound(&lower, &upper).unwrap(); let events = events_object_store.get_all_with_key(&events_key_range)?.await?; // web_sys::console::log_1( From 8cfe19cbb02e85712d746e15e01e43522bf3957b Mon Sep 17 00:00:00 2001 From: Oscar Franco Date: Mon, 24 Feb 2025 11:46:13 +0100 Subject: [PATCH 38/38] Add stub for tests --- .../src/event_cache_store/mod.rs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs index 5859a30e982..15b460c5006 100644 --- a/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/event_cache_store/mod.rs @@ -867,3 +867,43 @@ impl EventCacheStore for IndexeddbEventCacheStore { } } } + +#[cfg(test)] +mod tests { + use std::{ + sync::atomic::{AtomicU32, Ordering::SeqCst}, + time::Duration, + }; + + use assert_matches::assert_matches; + use matrix_sdk_base::{ + event_cache::{ + store::{ + integration_tests::{check_test_event, make_test_event}, + media::IgnoreMediaRetentionPolicy, + EventCacheStore, EventCacheStoreError, + }, + Gap, + }, + event_cache_store_integration_tests, event_cache_store_integration_tests_time, + event_cache_store_media_integration_tests, + linked_chunk::{ChunkContent, ChunkIdentifier, Position, Update}, + media::{MediaFormat, MediaRequestParameters, MediaThumbnailSettings}, + }; + use matrix_sdk_test::{async_test, DEFAULT_TEST_ROOM_ID}; + use ruma::{events::room::MediaSource, media::Method, mxc_uri, room_id, uint}; + use uuid::Uuid; + + use super::IndexeddbEventCacheStore; + + wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + + async fn get_event_cache_store() -> Result { + let db_name = format!("test-event-cache-store-{}", Uuid::new_v4().as_hyphenated()); + Ok(IndexeddbEventCacheStore::builder().name(db_name).build().await?) + } + + event_cache_store_integration_tests!(); + // event_cache_store_integration_tests_time!(); + // event_cache_store_media_integration_tests!(with_media_size_tests); +}