diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5fbe6964e6d..2de47b34852 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -304,7 +304,7 @@ jobs: uses: actions/checkout@v4 - name: Check the spelling of the files in our repo - uses: crate-ci/typos@v1.26.8 + uses: crate-ci/typos@v1.27.0 clippy: name: Run clippy diff --git a/Cargo.lock b/Cargo.lock index be9c9a99775..30db5f2af15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2898,6 +2898,7 @@ dependencies = [ "futures-executor", "futures-util", "gloo-timers", + "growable-bloom-filter", "http", "imbl", "indexmap 2.6.0", @@ -4574,8 +4575,9 @@ dependencies = [ [[package]] name = "ruma" -version = "0.10.1" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94984418ae8a5e1160e6c87608141330e9ae26330abf22e3d15416efa96d48a" dependencies = [ "assign", "js_int", @@ -4591,8 +4593,9 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.18.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "325e054db8d5545c00767d9868356d61e63f2c6cb8b54768346d66696ea4ad48" dependencies = [ "as_variant", "assign", @@ -4614,8 +4617,9 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad71c7f49abaa047ba228339d34f9aaefa4d8b50ebeb8e859d0340cc2138bda8" dependencies = [ "as_variant", "base64 0.22.1", @@ -4646,8 +4650,9 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.28.1" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be86dccf3504588c1f4dc1bda4ce1f8bbd646fc6dda40c77cc7de6e203e62dad" dependencies = [ "as_variant", "indexmap 2.6.0", @@ -4671,8 +4676,9 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5a09ac22b3352bf7a350514dc9a87e1b56aba04c326ac9ce142740f7218afa" dependencies = [ "http", "js_int", @@ -4685,8 +4691,9 @@ dependencies = [ [[package]] name = "ruma-html" -version = "0.2.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7571886b6df90a4ed72e7481a5a39cc2a5b3a4e956e9366ad798e4e2e9fe8005" dependencies = [ "as_variant", "html5ever", @@ -4697,8 +4704,9 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.9.5" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e7f9b534a65698d7db3c08d94bf91de0046fe6c7893a7b360502f65e7011ac4" dependencies = [ "js_int", "thiserror", @@ -4706,8 +4714,9 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8d57d3cb20e8e758e8f7c5e408ce831d46758003b615100099852e468631934" dependencies = [ "cfg-if", "once_cell", @@ -4722,8 +4731,9 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=26165b23fc2ae9928c5497a21db3d31f4b44cc2a#26165b23fc2ae9928c5497a21db3d31f4b44cc2a" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfced466fbed6277f74ac3887eeb96c185a09f4323dc3c39bcea04870430fe9a" dependencies = [ "js_int", "ruma-common", diff --git a/Cargo.toml b/Cargo.toml index d648226e1fc..e05f04ac6b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,7 +45,7 @@ once_cell = "1.16.0" pin-project-lite = "0.2.9" rand = "0.8.5" reqwest = { version = "0.12.4", default-features = false } -ruma = { git = "https://github.com/ruma/ruma", rev = "26165b23fc2ae9928c5497a21db3d31f4b44cc2a", features = [ +ruma = { version = "0.11.1", features = [ "client-api-c", "compat-upload-signatures", "compat-user-id", @@ -59,7 +59,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "26165b23fc2ae9928c5497a21d "unstable-msc4075", "unstable-msc4140", ] } -ruma-common = { git = "https://github.com/ruma/ruma", rev = "26165b23fc2ae9928c5497a21db3d31f4b44cc2a" } +ruma-common = "0.14.1" serde = "1.0.151" serde_html_form = "0.2.0" serde_json = "1.0.91" diff --git a/benchmarks/benches/room_bench.rs b/benchmarks/benches/room_bench.rs index 5b010b6ee31..2283cc955ab 100644 --- a/benchmarks/benches/room_bench.rs +++ b/benchmarks/benches/room_bench.rs @@ -171,8 +171,9 @@ pub fn load_pinned_events_benchmark(c: &mut Criterion) { ); let room = client.get_room(&room_id).expect("Room not found"); - assert!(!room.pinned_event_ids().is_empty()); - assert_eq!(room.pinned_event_ids().len(), PINNED_EVENTS_COUNT); + let pinned_event_ids = room.pinned_event_ids().unwrap_or_default(); + assert!(!pinned_event_ids.is_empty()); + assert_eq!(pinned_event_ids.len(), PINNED_EVENTS_COUNT); let count = PINNED_EVENTS_COUNT; let name = format!("{count} pinned events"); @@ -191,8 +192,9 @@ pub fn load_pinned_events_benchmark(c: &mut Criterion) { group.bench_function(BenchmarkId::new("load_pinned_events", name), |b| { b.to_async(&runtime).iter(|| async { - assert!(!room.pinned_event_ids().is_empty()); - assert_eq!(room.pinned_event_ids().len(), PINNED_EVENTS_COUNT); + let pinned_event_ids = room.pinned_event_ids().unwrap_or_default(); + assert!(!pinned_event_ids.is_empty()); + assert_eq!(pinned_event_ids.len(), PINNED_EVENTS_COUNT); // Reset cache so it always loads the events from the mocked endpoint client.event_cache().empty_immutable_cache().await; diff --git a/bindings/matrix-sdk-crypto-ffi/src/machine.rs b/bindings/matrix-sdk-crypto-ffi/src/machine.rs index da12075bfe4..6403379ac6c 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/machine.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/machine.rs @@ -42,7 +42,8 @@ use ruma::{ }, serde::Raw, to_device::DeviceIdOrAllDevices, - DeviceKeyAlgorithm, EventId, OwnedTransactionId, OwnedUserId, RoomId, UserId, + DeviceKeyAlgorithm, EventId, OneTimeKeyAlgorithm, OwnedTransactionId, OwnedUserId, RoomId, + UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{value::RawValue, Value}; @@ -528,11 +529,11 @@ impl OlmMachine { ) -> Result { let to_device: ToDevice = serde_json::from_str(&events)?; let device_changes: RumaDeviceLists = device_changes.into(); - let key_counts: BTreeMap = key_counts + let key_counts: BTreeMap = key_counts .into_iter() .map(|(k, v)| { ( - DeviceKeyAlgorithm::from(k), + OneTimeKeyAlgorithm::from(k), v.clamp(0, i32::MAX) .try_into() .expect("Couldn't convert key counts into an UInt"), @@ -540,8 +541,8 @@ impl OlmMachine { }) .collect(); - let unused_fallback_keys: Option> = - unused_fallback_keys.map(|u| u.into_iter().map(DeviceKeyAlgorithm::from).collect()); + let unused_fallback_keys: Option> = + unused_fallback_keys.map(|u| u.into_iter().map(OneTimeKeyAlgorithm::from).collect()); let (to_device_events, room_key_infos) = self.runtime.block_on( self.inner.receive_sync_changes(matrix_sdk_crypto::EncryptionSyncChanges { diff --git a/bindings/matrix-sdk-crypto-ffi/src/users.rs b/bindings/matrix-sdk-crypto-ffi/src/users.rs index dfffd9fe546..4f5f54ea85e 100644 --- a/bindings/matrix-sdk-crypto-ffi/src/users.rs +++ b/bindings/matrix-sdk-crypto-ffi/src/users.rs @@ -18,6 +18,8 @@ pub enum UserIdentity { user_signing_key: String, /// The public self-signing key of our identity. self_signing_key: String, + /// True if this identity was verified at some point but is not anymore. + has_verification_violation: bool, }, /// The user identity of other users. Other { @@ -27,6 +29,8 @@ pub enum UserIdentity { master_key: String, /// The public self-signing key of our identity. self_signing_key: String, + /// True if this identity was verified at some point but is not anymore. + has_verification_violation: bool, }, } @@ -44,6 +48,7 @@ impl UserIdentity { master_key: serde_json::to_string(&master)?, user_signing_key: serde_json::to_string(&user_signing)?, self_signing_key: serde_json::to_string(&self_signing)?, + has_verification_violation: i.has_verification_violation(), } } SdkUserIdentity::Other(i) => { @@ -54,6 +59,7 @@ impl UserIdentity { user_id: i.user_id().to_string(), master_key: serde_json::to_string(&master)?, self_signing_key: serde_json::to_string(&self_signing)?, + has_verification_violation: i.has_verification_violation(), } } }) diff --git a/bindings/matrix-sdk-ffi/src/client.rs b/bindings/matrix-sdk-ffi/src/client.rs index 5e928e571f5..ea37febad8f 100644 --- a/bindings/matrix-sdk-ffi/src/client.rs +++ b/bindings/matrix-sdk-ffi/src/client.rs @@ -1,7 +1,6 @@ use std::{ collections::HashMap, fmt::Debug, - mem::ManuallyDrop, path::Path, sync::{Arc, RwLock}, }; @@ -23,6 +22,7 @@ use matrix_sdk::{ }, OidcAuthorizationData, OidcSession, }, + reqwest::StatusCode, ruma::{ api::client::{ media::get_content_thumbnail::v3::Method, @@ -41,7 +41,7 @@ use matrix_sdk::{ EventEncryptionAlgorithm, RoomId, TransactionId, UInt, UserId, }, sliding_sync::Version as SdkSlidingSyncVersion, - AuthApi, AuthSession, Client as MatrixClient, SessionChange, SessionTokens, + AuthApi, AuthSession, Client as MatrixClient, HttpError, SessionChange, SessionTokens, }; use matrix_sdk_ui::notification_client::{ NotificationClient as MatrixNotificationClient, @@ -79,6 +79,7 @@ use crate::{ ruma::AuthData, sync_service::{SyncService, SyncServiceBuilder}, task_handle::TaskHandle, + utils::AsyncRuntimeDropped, ClientError, }; @@ -184,26 +185,12 @@ impl From for TransmissionProgress { #[derive(uniffi::Object)] pub struct Client { - pub(crate) inner: ManuallyDrop, + pub(crate) inner: AsyncRuntimeDropped, delegate: RwLock>>, session_verification_controller: Arc>>, } -impl Drop for Client { - fn drop(&mut self) { - // Dropping the inner OlmMachine must happen within a tokio context - // because deadpool drops sqlite connections in the DB pool on tokio's - // blocking threadpool to avoid blocking async worker threads. - let _guard = RUNTIME.enter(); - // SAFETY: self.inner is never used again, which is the only requirement - // for ManuallyDrop::drop to be used safely. - unsafe { - ManuallyDrop::drop(&mut self.inner); - } - } -} - impl Client { pub async fn new( sdk_client: MatrixClient, @@ -224,7 +211,7 @@ impl Client { }); let client = Client { - inner: ManuallyDrop::new(sdk_client), + inner: AsyncRuntimeDropped::new(sdk_client), delegate: RwLock::new(None), session_verification_controller, }; @@ -503,7 +490,7 @@ impl Client { Arc::new(TaskHandle::new(RUNTIME.spawn(async move { // Respawn tasks for rooms that had unsent events. At this point we've just // created the subscriber, so it'll be notified about errors. - q.respawn_tasks_for_rooms_with_unsent_events().await; + q.respawn_tasks_for_rooms_with_unsent_requests().await; loop { match subscriber.recv().await { @@ -606,6 +593,10 @@ impl Client { &self, action: Option, ) -> Result, ClientError> { + if !matches!(self.inner.auth_api(), Some(AuthApi::Oidc(..))) { + return Ok(None); + } + match self.inner.oidc().account_management_url(action.map(Into::into)).await { Ok(url) => Ok(url.map(|u| u.to_string())), Err(e) => { @@ -1037,10 +1028,21 @@ impl Client { pub async fn resolve_room_alias( &self, room_alias: String, - ) -> Result { + ) -> Result, ClientError> { let room_alias = RoomAliasId::parse(&room_alias)?; - let response = self.inner.resolve_room_alias(&room_alias).await?; - Ok(response.into()) + match self.inner.resolve_room_alias(&room_alias).await { + Ok(response) => Ok(Some(response.into())), + Err(HttpError::Reqwest(http_error)) => match http_error.status() { + Some(StatusCode::NOT_FOUND) => Ok(None), + _ => Err(http_error.into()), + }, + Err(error) => Err(error.into()), + } + } + + /// Checks if a room alias exists in the current homeserver. + pub async fn room_alias_exists(&self, room_alias: String) -> Result { + self.resolve_room_alias(room_alias).await.map(|ret| ret.is_some()) } /// Given a room id, get the preview of a room, to interact with it. @@ -1126,6 +1128,23 @@ impl Client { Ok(()) } + + /// Checks if a room alias is available in the current homeserver. + pub async fn is_room_alias_available(&self, alias: String) -> Result { + let alias = RoomAliasId::parse(alias)?; + match self.inner.resolve_room_alias(&alias).await { + // The room alias was resolved, so it's already in use. + Ok(_) => Ok(false), + Err(HttpError::Reqwest(error)) => { + match error.status() { + // The room alias wasn't found, so it's available. + Some(StatusCode::NOT_FOUND) => Ok(true), + _ => Err(HttpError::Reqwest(error).into()), + } + } + Err(error) => Err(error.into()), + } + } } #[matrix_sdk_ffi_macros::export(callback_interface)] diff --git a/bindings/matrix-sdk-ffi/src/room_directory_search.rs b/bindings/matrix-sdk-ffi/src/room_directory_search.rs index 9d37b60d45e..73baceff5e5 100644 --- a/bindings/matrix-sdk-ffi/src/room_directory_search.rs +++ b/bindings/matrix-sdk-ffi/src/room_directory_search.rs @@ -18,6 +18,7 @@ use std::{fmt::Debug, sync::Arc}; use eyeball_im::VectorDiff; use futures_util::StreamExt; use matrix_sdk::room_directory_search::RoomDirectorySearch as SdkRoomDirectorySearch; +use ruma::ServerName; use tokio::sync::RwLock; use super::RUNTIME; @@ -68,6 +69,12 @@ impl From for RoomDescriptio } } +/// A helper for performing room searches in the room directory. +/// The way this is intended to be used is: +/// +/// 1. Register a callback using [`RoomDirectorySearch::results`]. +/// 2. Start the room search with [`RoomDirectorySearch::search`]. +/// 3. To get more results, use [`RoomDirectorySearch::next_page`]. #[derive(uniffi::Object)] pub struct RoomDirectorySearch { pub(crate) inner: RwLock, @@ -81,28 +88,49 @@ impl RoomDirectorySearch { #[matrix_sdk_ffi_macros::export] impl RoomDirectorySearch { + /// Asks the server for the next page of the current search. pub async fn next_page(&self) -> Result<(), ClientError> { let mut inner = self.inner.write().await; inner.next_page().await?; Ok(()) } - pub async fn search(&self, filter: Option, batch_size: u32) -> Result<(), ClientError> { + /// Starts a filtered search for the server. + /// + /// If the `filter` is not provided it will search for all the rooms. + /// You can specify a `batch_size` to control the number of rooms to fetch + /// per request. + /// + /// If the `via_server` is not provided it will search in the current + /// homeserver by default. + /// + /// This method will clear the current search results and start a new one. + pub async fn search( + &self, + filter: Option, + batch_size: u32, + via_server_name: Option, + ) -> Result<(), ClientError> { + let server = via_server_name.map(ServerName::parse).transpose()?; let mut inner = self.inner.write().await; - inner.search(filter, batch_size).await?; + inner.search(filter, batch_size, server).await?; Ok(()) } + /// Get the number of pages that have been loaded so far. pub async fn loaded_pages(&self) -> Result { let inner = self.inner.read().await; Ok(inner.loaded_pages() as u32) } + /// Get whether the search is at the last page. pub async fn is_at_last_page(&self) -> Result { let inner = self.inner.read().await; Ok(inner.is_at_last_page()) } + /// Registers a callback to receive new search results when starting a + /// search or getting new paginated results. pub async fn results( &self, listener: Box, diff --git a/bindings/matrix-sdk-ffi/src/room_info.rs b/bindings/matrix-sdk-ffi/src/room_info.rs index 3f04eed962b..96de331fd10 100644 --- a/bindings/matrix-sdk-ffi/src/room_info.rs +++ b/bindings/matrix-sdk-ffi/src/room_info.rs @@ -67,7 +67,8 @@ impl RoomInfo { for (id, level) in power_levels_map.iter() { user_power_levels.insert(id.to_string(), *level); } - let pinned_event_ids = room.pinned_event_ids().iter().map(|id| id.to_string()).collect(); + let pinned_event_ids = + room.pinned_event_ids().unwrap_or_default().iter().map(|id| id.to_string()).collect(); Ok(Self { id: room.room_id().to_string(), diff --git a/bindings/matrix-sdk-ffi/src/room_list.rs b/bindings/matrix-sdk-ffi/src/room_list.rs index 27bc20dcc34..412ee88cce4 100644 --- a/bindings/matrix-sdk-ffi/src/room_list.rs +++ b/bindings/matrix-sdk-ffi/src/room_list.rs @@ -1,12 +1,6 @@ #![allow(deprecated)] -use std::{ - fmt::Debug, - mem::{ManuallyDrop, MaybeUninit}, - ptr::addr_of_mut, - sync::Arc, - time::Duration, -}; +use std::{fmt::Debug, mem::MaybeUninit, ptr::addr_of_mut, sync::Arc, time::Duration}; use eyeball_im::VectorDiff; use futures_util::{pin_mut, StreamExt, TryFutureExt}; @@ -34,6 +28,7 @@ use crate::{ room_preview::RoomPreview, timeline::{EventTimelineItem, Timeline}, timeline_event_filter::TimelineEventTypeFilter, + utils::AsyncRuntimeDropped, TaskHandle, RUNTIME, }; @@ -188,7 +183,6 @@ impl RoomList { listener: Box, ) -> Arc { let this = self.clone(); - let client = self.room_list_service.inner.client(); let utd_hook = self.room_list_service.utd_hook.clone(); // The following code deserves a bit of explanation. @@ -236,10 +230,7 @@ impl RoomList { // borrowing `this`, which is going to live long enough since it will live as // long as `entries_stream` and `dynamic_entries_controller`. let (entries_stream, dynamic_entries_controller) = - this.inner.entries_with_dynamic_adapters( - page_size.try_into().unwrap(), - client.room_info_notable_update_receiver(), - ); + this.inner.entries_with_dynamic_adapters(page_size.try_into().unwrap()); // FFI dance to make those values consumable by foreign language, nothing fancy // here, that's the real code for this method. @@ -635,7 +626,7 @@ impl RoomListItem { let room_preview = client.get_room_preview(&room_or_alias_id, server_names).await?; - Ok(Arc::new(RoomPreview::new(ManuallyDrop::new(client), room_preview))) + Ok(Arc::new(RoomPreview::new(AsyncRuntimeDropped::new(client), room_preview))) } /// Build a full `Room` FFI object, filling its associated timeline. diff --git a/bindings/matrix-sdk-ffi/src/room_preview.rs b/bindings/matrix-sdk-ffi/src/room_preview.rs index 08d3b626573..505081fc526 100644 --- a/bindings/matrix-sdk-ffi/src/room_preview.rs +++ b/bindings/matrix-sdk-ffi/src/room_preview.rs @@ -1,33 +1,16 @@ -use std::mem::ManuallyDrop; - use anyhow::Context as _; -use async_compat::TOKIO1 as RUNTIME; use matrix_sdk::{room_preview::RoomPreview as SdkRoomPreview, Client}; use ruma::space::SpaceRoomJoinRule; use tracing::warn; -use crate::{client::JoinRule, error::ClientError, room::Membership}; +use crate::{client::JoinRule, error::ClientError, room::Membership, utils::AsyncRuntimeDropped}; /// A room preview for a room. It's intended to be used to represent rooms that /// aren't joined yet. #[derive(uniffi::Object)] pub struct RoomPreview { inner: SdkRoomPreview, - client: ManuallyDrop, -} - -impl Drop for RoomPreview { - fn drop(&mut self) { - // Dropping the inner OlmMachine must happen within a tokio context - // because deadpool drops sqlite connections in the DB pool on tokio's - // blocking threadpool to avoid blocking async worker threads. - let _guard = RUNTIME.enter(); - // SAFETY: self.client is never used again, which is the only requirement - // for ManuallyDrop::drop to be used safely. - unsafe { - ManuallyDrop::drop(&mut self.client); - } - } + client: AsyncRuntimeDropped, } #[matrix_sdk_ffi_macros::export] @@ -65,7 +48,7 @@ impl RoomPreview { } impl RoomPreview { - pub(crate) fn new(client: ManuallyDrop, inner: SdkRoomPreview) -> Self { + pub(crate) fn new(client: AsyncRuntimeDropped, inner: SdkRoomPreview) -> Self { Self { client, inner } } } diff --git a/bindings/matrix-sdk-ffi/src/utils.rs b/bindings/matrix-sdk-ffi/src/utils.rs index 06fe82fc468..8868573e344 100644 --- a/bindings/matrix-sdk-ffi/src/utils.rs +++ b/bindings/matrix-sdk-ffi/src/utils.rs @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::{mem::ManuallyDrop, ops::Deref}; + +use async_compat::TOKIO1 as RUNTIME; use ruma::UInt; use tracing::warn; @@ -21,3 +24,45 @@ pub(crate) fn u64_to_uint(u: u64) -> UInt { UInt::MAX }) } + +/// Tiny wrappers for data types that must be dropped in the context of an async +/// runtime. +/// +/// This is useful whenever such a data type may transitively call some +/// runtime's `block_on` function in their `Drop` impl (since we lack async drop +/// at the moment), like done in some `deadpool` drop impls. +pub(crate) struct AsyncRuntimeDropped(ManuallyDrop); + +impl AsyncRuntimeDropped { + /// Create a new wrapper for this type that will be dropped under an async + /// runtime. + pub fn new(val: T) -> Self { + Self(ManuallyDrop::new(val)) + } +} + +impl Drop for AsyncRuntimeDropped { + fn drop(&mut self) { + let _guard = RUNTIME.enter(); + // SAFETY: self.inner is never used again, which is the only requirement + // for ManuallyDrop::drop to be used safely. + unsafe { + ManuallyDrop::drop(&mut self.0); + } + } +} + +// What is an `AsyncRuntimeDropped`, if not a `T` in disguise? +impl Deref for AsyncRuntimeDropped { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Clone for AsyncRuntimeDropped { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} diff --git a/cliff.toml b/cliff.toml index b81994cffeb..a3589a536c4 100644 --- a/cliff.toml +++ b/cliff.toml @@ -75,6 +75,8 @@ commit_parsers = [ { message = "^test", skip = true }, { message = "^ci", skip = true }, ] +# forbid parsers from skipping breaking changes +protect_breaking_commits = true # filter out the commits that are not matched by commit parsers filter_commits = true # glob pattern for matching git tags diff --git a/crates/matrix-sdk-base/src/client.rs b/crates/matrix-sdk-base/src/client.rs index 1033345a80b..36485c13e26 100644 --- a/crates/matrix-sdk-base/src/client.rs +++ b/crates/matrix-sdk-base/src/client.rs @@ -43,6 +43,7 @@ use ruma::{ api::client as api, events::{ ignored_user_list::IgnoredUserListEvent, + marked_unread::MarkedUnreadEventContent, push_rules::{PushRulesEvent, PushRulesEventContent}, room::{ member::{MembershipState, RoomMemberEventContent, SyncRoomMemberEvent}, @@ -93,19 +94,23 @@ use crate::{ pub struct BaseClient { /// Database pub(crate) store: Store, + /// The store used by the event cache. event_cache_store: Arc, + /// The store used for encryption. /// /// This field is only meant to be used for `OlmMachine` initialization. /// All operations on it happen inside the `OlmMachine`. #[cfg(feature = "e2e-encryption")] crypto_store: Arc, + /// The olm-machine that is created once the /// [`SessionMeta`][crate::session::SessionMeta] is set via /// [`BaseClient::set_session_meta`] #[cfg(feature = "e2e-encryption")] olm_machine: Arc>>, + /// Observable of when a user is ignored/unignored. pub(crate) ignore_user_list_changes: SharedObservable>, @@ -683,6 +688,25 @@ impl BaseClient { } } + // Helper to update the unread marker for stable and unstable prefixes. + fn on_unread_marker( + room_id: &RoomId, + content: &MarkedUnreadEventContent, + room_info: &mut RoomInfo, + room_info_notable_updates: &mut BTreeMap, + ) { + if room_info.base_info.is_marked_unread != content.unread { + // Notify the room list about a manual read marker change if the + // value's changed. + room_info_notable_updates + .entry(room_id.to_owned()) + .or_default() + .insert(RoomInfoNotableUpdateReasons::UNREAD_MARKER); + } + + room_info.base_info.is_marked_unread = content.unread; + } + // Handle new events. for raw_event in events { match raw_event.deserialize() { @@ -692,19 +716,24 @@ impl BaseClient { match event { AnyRoomAccountDataEvent::MarkedUnread(event) => { on_room_info(room_id, changes, self, |room_info| { - if room_info.base_info.is_marked_unread != event.content.unread { - // Notify the room list about a manual read marker change if the - // value's changed. - room_info_notable_updates - .entry(room_id.to_owned()) - .or_default() - .insert(RoomInfoNotableUpdateReasons::UNREAD_MARKER); - } - - room_info.base_info.is_marked_unread = event.content.unread; + on_unread_marker( + room_id, + &event.content, + room_info, + room_info_notable_updates, + ); + }); + } + AnyRoomAccountDataEvent::UnstableMarkedUnread(event) => { + on_room_info(room_id, changes, self, |room_info| { + on_unread_marker( + room_id, + &event.content.0, + room_info, + room_info_notable_updates, + ); }); } - AnyRoomAccountDataEvent::Tag(event) => { on_room_info(room_id, changes, self, |room_info| { room_info.base_info.handle_notable_tags(&event.content.tags); diff --git a/crates/matrix-sdk-base/src/event_cache_store/integration_tests.rs b/crates/matrix-sdk-base/src/event_cache_store/integration_tests.rs index 2a0cc30faf0..ccb92f05005 100644 --- a/crates/matrix-sdk-base/src/event_cache_store/integration_tests.rs +++ b/crates/matrix-sdk-base/src/event_cache_store/integration_tests.rs @@ -31,6 +31,9 @@ use crate::media::{MediaFormat, MediaRequest, MediaThumbnailSettings}; pub trait EventCacheStoreIntegrationTests { /// Test media content storage. async fn test_media_content(&self); + + /// Test replacing a MXID. + async fn test_replace_media_key(&self); } #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] @@ -139,6 +142,42 @@ impl EventCacheStoreIntegrationTests for DynEventCacheStore { "other media was removed" ); } + + async fn test_replace_media_key(&self) { + let uri = mxc_uri!("mxc://sendqueue.local/tr4n-s4ct-10n1-d"); + let req = + MediaRequest { source: MediaSource::Plain(uri.to_owned()), format: MediaFormat::File }; + + let content = "hello".as_bytes().to_owned(); + + // Media isn't present in the cache. + assert!(self.get_media_content(&req).await.unwrap().is_none(), "unexpected media found"); + + // Add the media. + self.add_media_content(&req, content.clone()).await.expect("adding media failed"); + + // Sanity-check: media is found after adding it. + assert_eq!(self.get_media_content(&req).await.unwrap().unwrap(), b"hello"); + + // Replacing a media request works. + let new_uri = mxc_uri!("mxc://matrix.org/tr4n-s4ct-10n1-d"); + let new_req = MediaRequest { + source: MediaSource::Plain(new_uri.to_owned()), + format: MediaFormat::File, + }; + self.replace_media_key(&req, &new_req) + .await + .expect("replacing the media request key failed"); + + // Finding with the previous request doesn't work anymore. + assert!( + self.get_media_content(&req).await.unwrap().is_none(), + "unexpected media found with the old key" + ); + + // Finding with the new request does work. + assert_eq!(self.get_media_content(&new_req).await.unwrap().unwrap(), b"hello"); + } } /// Macro building to allow your `EventCacheStore` implementation to run the @@ -184,6 +223,13 @@ macro_rules! event_cache_store_integration_tests { get_event_cache_store().await.unwrap().into_event_cache_store(); event_cache_store.test_media_content().await; } + + #[async_test] + async fn test_replace_media_key() { + let event_cache_store = + get_event_cache_store().await.unwrap().into_event_cache_store(); + event_cache_store.test_replace_media_key().await; + } } }; } diff --git a/crates/matrix-sdk-base/src/event_cache_store/memory_store.rs b/crates/matrix-sdk-base/src/event_cache_store/memory_store.rs index 381c52cbc19..15fdb23708e 100644 --- a/crates/matrix-sdk-base/src/event_cache_store/memory_store.rs +++ b/crates/matrix-sdk-base/src/event_cache_store/memory_store.rs @@ -60,18 +60,35 @@ impl EventCacheStore for MemoryStore { Ok(()) } + async fn replace_media_key( + &self, + from: &MediaRequest, + to: &MediaRequest, + ) -> Result<(), Self::Error> { + let expected_key = from.unique_key(); + + let mut medias = self.media.write().unwrap(); + if let Some((mxc, key, _)) = medias.iter_mut().find(|(_, key, _)| *key == expected_key) { + *mxc = to.uri().to_owned(); + *key = to.unique_key(); + } + + Ok(()) + } + async fn get_media_content(&self, request: &MediaRequest) -> Result>> { - let media = self.media.read().unwrap(); let expected_key = request.unique_key(); + let media = self.media.read().unwrap(); Ok(media.iter().find_map(|(_media_uri, media_key, media_content)| { (media_key == &expected_key).then(|| media_content.to_owned()) })) } async fn remove_media_content(&self, request: &MediaRequest) -> Result<()> { - let mut media = self.media.write().unwrap(); let expected_key = request.unique_key(); + + let mut media = self.media.write().unwrap(); let Some(index) = media .iter() .position(|(_media_uri, media_key, _media_content)| media_key == &expected_key) diff --git a/crates/matrix-sdk-base/src/event_cache_store/traits.rs b/crates/matrix-sdk-base/src/event_cache_store/traits.rs index 6c56166177b..08691b06729 100644 --- a/crates/matrix-sdk-base/src/event_cache_store/traits.rs +++ b/crates/matrix-sdk-base/src/event_cache_store/traits.rs @@ -42,6 +42,28 @@ pub trait EventCacheStore: AsyncTraitDeps { content: Vec, ) -> Result<(), Self::Error>; + /// Replaces the given media's content key with another one. + /// + /// This should be used whenever a temporary (local) MXID has been used, and + /// it must now be replaced with its actual remote counterpart (after + /// uploading some content, or creating an empty MXC URI). + /// + /// ⚠ No check is performed to ensure that the media formats are consistent, + /// i.e. it's possible to update with a thumbnail key a media that was + /// keyed as a file before. The caller is responsible of ensuring that + /// the replacement makes sense, according to their use case. + /// + /// # Arguments + /// + /// * `from` - The previous `MediaRequest` of the file. + /// + /// * `to` - The new `MediaRequest` of the file. + async fn replace_media_key( + &self, + from: &MediaRequest, + to: &MediaRequest, + ) -> Result<(), Self::Error>; + /// Get a media file's content out of the media store. /// /// # Arguments @@ -91,6 +113,14 @@ impl EventCacheStore for EraseEventCacheStoreError { self.0.add_media_content(request, content).await.map_err(Into::into) } + async fn replace_media_key( + &self, + from: &MediaRequest, + to: &MediaRequest, + ) -> Result<(), Self::Error> { + self.0.replace_media_key(from, to).await.map_err(Into::into) + } + async fn get_media_content( &self, request: &MediaRequest, diff --git a/crates/matrix-sdk-base/src/rooms/normal.rs b/crates/matrix-sdk-base/src/rooms/normal.rs index cfe1625b7af..7181d4d4824 100644 --- a/crates/matrix-sdk-base/src/rooms/normal.rs +++ b/crates/matrix-sdk-base/src/rooms/normal.rs @@ -1019,7 +1019,7 @@ impl Room { } /// Returns the current pinned event ids for this room. - pub fn pinned_event_ids(&self) -> Vec { + pub fn pinned_event_ids(&self) -> Option> { self.inner.read().pinned_event_ids() } } @@ -1596,8 +1596,8 @@ impl RoomInfo { } /// Returns the current pinned event ids for this room. - pub fn pinned_event_ids(&self) -> Vec { - self.base_info.pinned_events.clone().map(|c| c.pinned).unwrap_or_default() + pub fn pinned_event_ids(&self) -> Option> { + self.base_info.pinned_events.clone().map(|c| c.pinned) } /// Checks if an `EventId` is currently pinned. diff --git a/crates/matrix-sdk-base/src/sliding_sync/mod.rs b/crates/matrix-sdk-base/src/sliding_sync/mod.rs index f613d177b6e..9350492a098 100644 --- a/crates/matrix-sdk-base/src/sliding_sync/mod.rs +++ b/crates/matrix-sdk-base/src/sliding_sync/mod.rs @@ -2509,7 +2509,7 @@ mod tests { // The newly created room has no pinned event ids let room = client.get_room(room_id).unwrap(); let pinned_event_ids = room.pinned_event_ids(); - assert!(pinned_event_ids.is_empty()); + assert_matches!(pinned_event_ids, None); // Load new pinned event id let mut room_response = http::response::Room::new(); @@ -2522,7 +2522,7 @@ mod tests { let response = response_with_room(room_id, room_response); client.process_sliding_sync(&response, &(), true).await.expect("Failed to process sync"); - let pinned_event_ids = room.pinned_event_ids(); + let pinned_event_ids = room.pinned_event_ids().unwrap_or_default(); assert_eq!(pinned_event_ids.len(), 1); assert_eq!(pinned_event_ids[0], pinned_event_id); @@ -2536,7 +2536,7 @@ mod tests { )); let response = response_with_room(room_id, room_response); client.process_sliding_sync(&response, &(), true).await.expect("Failed to process sync"); - let pinned_event_ids = room.pinned_event_ids(); + let pinned_event_ids = room.pinned_event_ids().unwrap(); assert!(pinned_event_ids.is_empty()); } diff --git a/crates/matrix-sdk-base/src/store/integration_tests.rs b/crates/matrix-sdk-base/src/store/integration_tests.rs index eccebdc9190..131f919c2c4 100644 --- a/crates/matrix-sdk-base/src/store/integration_tests.rs +++ b/crates/matrix-sdk-base/src/store/integration_tests.rs @@ -33,13 +33,12 @@ use ruma::{ }; use serde_json::{json, value::Value as JsonValue}; -use super::{DependentQueuedEventKind, DynStateStore, ServerCapabilities}; +use super::{ + send_queue::SentRequestKey, DependentQueuedRequestKind, DynStateStore, ServerCapabilities, +}; use crate::{ deserialized_responses::MemberEvent, - store::{ - traits::ChildTransactionId, QueueWedgeError, Result, SerializableEventContent, - StateStoreExt, - }, + store::{ChildTransactionId, QueueWedgeError, Result, SerializableEventContent, StateStoreExt}, RoomInfo, RoomMemberships, RoomState, StateChanges, StateStoreDataKey, StateStoreDataValue, }; @@ -1205,7 +1204,7 @@ impl StateStoreIntegrationTests for DynStateStore { let room_id = room_id!("!test_send_queue:localhost"); // No queued event in store at first. - let events = self.load_send_queue_events(room_id).await.unwrap(); + let events = self.load_send_queue_requests(room_id).await.unwrap(); assert!(events.is_empty()); // Saving one thing should work. @@ -1213,16 +1212,16 @@ impl StateStoreIntegrationTests for DynStateStore { let event0 = SerializableEventContent::new(&RoomMessageEventContent::text_plain("msg0").into()) .unwrap(); - self.save_send_queue_event(room_id, txn0.clone(), event0).await.unwrap(); + self.save_send_queue_request(room_id, txn0.clone(), event0.into()).await.unwrap(); // Reading it will work. - let pending = self.load_send_queue_events(room_id).await.unwrap(); + let pending = self.load_send_queue_requests(room_id).await.unwrap(); assert_eq!(pending.len(), 1); { assert_eq!(pending[0].transaction_id, txn0); - let deserialized = pending[0].event.deserialize().unwrap(); + let deserialized = pending[0].as_event().unwrap().deserialize().unwrap(); assert_let!(AnyMessageLikeEventContent::RoomMessage(content) = deserialized); assert_eq!(content.body(), "msg0"); @@ -1237,11 +1236,11 @@ impl StateStoreIntegrationTests for DynStateStore { ) .unwrap(); - self.save_send_queue_event(room_id, txn, event).await.unwrap(); + self.save_send_queue_request(room_id, txn, event.into()).await.unwrap(); } // Reading all the events should work. - let pending = self.load_send_queue_events(room_id).await.unwrap(); + let pending = self.load_send_queue_requests(room_id).await.unwrap(); // All the events should be retrieved, in the same order. assert_eq!(pending.len(), 4); @@ -1249,7 +1248,7 @@ impl StateStoreIntegrationTests for DynStateStore { assert_eq!(pending[0].transaction_id, txn0); for i in 0..4 { - let deserialized = pending[i].event.deserialize().unwrap(); + let deserialized = pending[i].as_event().unwrap().deserialize().unwrap(); assert_let!(AnyMessageLikeEventContent::RoomMessage(content) = deserialized); assert_eq!(content.body(), format!("msg{i}")); assert!(!pending[i].is_wedged()); @@ -1257,7 +1256,7 @@ impl StateStoreIntegrationTests for DynStateStore { // Marking an event as wedged works. let txn2 = &pending[2].transaction_id; - self.update_send_queue_event_status( + self.update_send_queue_request_status( room_id, txn2, Some(QueueWedgeError::GenericApiError { msg: "Oops".to_owned() }), @@ -1266,7 +1265,7 @@ impl StateStoreIntegrationTests for DynStateStore { .unwrap(); // And it is reflected. - let pending = self.load_send_queue_events(room_id).await.unwrap(); + let pending = self.load_send_queue_requests(room_id).await.unwrap(); // All the events should be retrieved, in the same order. assert_eq!(pending.len(), 4); @@ -1287,16 +1286,16 @@ impl StateStoreIntegrationTests for DynStateStore { &RoomMessageEventContent::text_plain("wow that's a cool test").into(), ) .unwrap(); - self.update_send_queue_event(room_id, txn2, event0).await.unwrap(); + self.update_send_queue_request(room_id, txn2, event0.into()).await.unwrap(); // And it is reflected. - let pending = self.load_send_queue_events(room_id).await.unwrap(); + let pending = self.load_send_queue_requests(room_id).await.unwrap(); assert_eq!(pending.len(), 4); { assert_eq!(pending[2].transaction_id, *txn2); - let deserialized = pending[2].event.deserialize().unwrap(); + let deserialized = pending[2].as_event().unwrap().deserialize().unwrap(); assert_let!(AnyMessageLikeEventContent::RoomMessage(content) = deserialized); assert_eq!(content.body(), "wow that's a cool test"); @@ -1304,7 +1303,7 @@ impl StateStoreIntegrationTests for DynStateStore { for i in 0..4 { if i != 2 { - let deserialized = pending[i].event.deserialize().unwrap(); + let deserialized = pending[i].as_event().unwrap().deserialize().unwrap(); assert_let!(AnyMessageLikeEventContent::RoomMessage(content) = deserialized); assert_eq!(content.body(), format!("msg{i}")); @@ -1314,10 +1313,10 @@ impl StateStoreIntegrationTests for DynStateStore { } // Removing an event works. - self.remove_send_queue_event(room_id, &txn0).await.unwrap(); + self.remove_send_queue_request(room_id, &txn0).await.unwrap(); // And it is reflected. - let pending = self.load_send_queue_events(room_id).await.unwrap(); + let pending = self.load_send_queue_requests(room_id).await.unwrap(); assert_eq!(pending.len(), 3); assert_eq!(pending[1].transaction_id, *txn2); @@ -1335,7 +1334,7 @@ impl StateStoreIntegrationTests for DynStateStore { let event = SerializableEventContent::new(&RoomMessageEventContent::text_plain("room2").into()) .unwrap(); - self.save_send_queue_event(room_id2, txn.clone(), event).await.unwrap(); + self.save_send_queue_request(room_id2, txn.clone(), event.into()).await.unwrap(); } // Add and remove one event for room3. @@ -1345,14 +1344,14 @@ impl StateStoreIntegrationTests for DynStateStore { let event = SerializableEventContent::new(&RoomMessageEventContent::text_plain("room3").into()) .unwrap(); - self.save_send_queue_event(room_id3, txn.clone(), event).await.unwrap(); + self.save_send_queue_request(room_id3, txn.clone(), event.into()).await.unwrap(); - self.remove_send_queue_event(room_id3, &txn).await.unwrap(); + self.remove_send_queue_request(room_id3, &txn).await.unwrap(); } // Query all the rooms which have unsent events. Per the previous steps, // it should be room1 and room2, not room3. - let outstanding_rooms = self.load_rooms_with_unsent_events().await.unwrap(); + let outstanding_rooms = self.load_rooms_with_unsent_requests().await.unwrap(); assert_eq!(outstanding_rooms.len(), 2); assert!(outstanding_rooms.iter().any(|room| room == room_id)); assert!(outstanding_rooms.iter().any(|room| room == room_id2)); @@ -1366,53 +1365,59 @@ impl StateStoreIntegrationTests for DynStateStore { let event0 = SerializableEventContent::new(&RoomMessageEventContent::text_plain("hey").into()) .unwrap(); - self.save_send_queue_event(room_id, txn0.clone(), event0).await.unwrap(); + self.save_send_queue_request(room_id, txn0.clone(), event0.into()).await.unwrap(); // No dependents, to start with. - assert!(self.list_dependent_send_queue_events(room_id).await.unwrap().is_empty()); + assert!(self.load_dependent_queued_requests(room_id).await.unwrap().is_empty()); // Save a redaction for that event. let child_txn = ChildTransactionId::new(); - self.save_dependent_send_queue_event( + self.save_dependent_queued_request( room_id, &txn0, child_txn.clone(), - DependentQueuedEventKind::Redact, + DependentQueuedRequestKind::RedactEvent, ) .await .unwrap(); // It worked. - let dependents = self.list_dependent_send_queue_events(room_id).await.unwrap(); + let dependents = self.load_dependent_queued_requests(room_id).await.unwrap(); assert_eq!(dependents.len(), 1); assert_eq!(dependents[0].parent_transaction_id, txn0); assert_eq!(dependents[0].own_transaction_id, child_txn); - assert!(dependents[0].event_id.is_none()); - assert_matches!(dependents[0].kind, DependentQueuedEventKind::Redact); + assert!(dependents[0].parent_key.is_none()); + assert_matches!(dependents[0].kind, DependentQueuedRequestKind::RedactEvent); // Update the event id. let event_id = owned_event_id!("$1"); - let num_updated = - self.update_dependent_send_queue_event(room_id, &txn0, event_id.clone()).await.unwrap(); + let num_updated = self + .update_dependent_queued_request( + room_id, + &txn0, + SentRequestKey::Event(event_id.clone()), + ) + .await + .unwrap(); assert_eq!(num_updated, 1); // It worked. - let dependents = self.list_dependent_send_queue_events(room_id).await.unwrap(); + let dependents = self.load_dependent_queued_requests(room_id).await.unwrap(); assert_eq!(dependents.len(), 1); assert_eq!(dependents[0].parent_transaction_id, txn0); assert_eq!(dependents[0].own_transaction_id, child_txn); - assert_eq!(dependents[0].event_id.as_ref(), Some(&event_id)); - assert_matches!(dependents[0].kind, DependentQueuedEventKind::Redact); + assert_eq!(dependents[0].parent_key.as_ref(), Some(&SentRequestKey::Event(event_id))); + assert_matches!(dependents[0].kind, DependentQueuedRequestKind::RedactEvent); // Now remove it. let removed = self - .remove_dependent_send_queue_event(room_id, &dependents[0].own_transaction_id) + .remove_dependent_queued_request(room_id, &dependents[0].own_transaction_id) .await .unwrap(); assert!(removed); // It worked. - assert!(self.list_dependent_send_queue_events(room_id).await.unwrap().is_empty()); + assert!(self.load_dependent_queued_requests(room_id).await.unwrap().is_empty()); // Now, inserting a dependent event and removing the original send queue event // will NOT remove the dependent event. @@ -1420,23 +1425,23 @@ impl StateStoreIntegrationTests for DynStateStore { let event1 = SerializableEventContent::new(&RoomMessageEventContent::text_plain("hey2").into()) .unwrap(); - self.save_send_queue_event(room_id, txn1.clone(), event1).await.unwrap(); + self.save_send_queue_request(room_id, txn1.clone(), event1.into()).await.unwrap(); - self.save_dependent_send_queue_event( + self.save_dependent_queued_request( room_id, &txn0, ChildTransactionId::new(), - DependentQueuedEventKind::Redact, + DependentQueuedRequestKind::RedactEvent, ) .await .unwrap(); - assert_eq!(self.list_dependent_send_queue_events(room_id).await.unwrap().len(), 1); + assert_eq!(self.load_dependent_queued_requests(room_id).await.unwrap().len(), 1); - self.save_dependent_send_queue_event( + self.save_dependent_queued_request( room_id, &txn1, ChildTransactionId::new(), - DependentQueuedEventKind::Edit { + DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain("edit").into(), ) @@ -1445,14 +1450,14 @@ impl StateStoreIntegrationTests for DynStateStore { ) .await .unwrap(); - assert_eq!(self.list_dependent_send_queue_events(room_id).await.unwrap().len(), 2); + assert_eq!(self.load_dependent_queued_requests(room_id).await.unwrap().len(), 2); // Remove event0 / txn0. - let removed = self.remove_send_queue_event(room_id, &txn0).await.unwrap(); + let removed = self.remove_send_queue_request(room_id, &txn0).await.unwrap(); assert!(removed); // This has removed none of the dependent events. - let dependents = self.list_dependent_send_queue_events(room_id).await.unwrap(); + let dependents = self.load_dependent_queued_requests(room_id).await.unwrap(); assert_eq!(dependents.len(), 2); } } diff --git a/crates/matrix-sdk-base/src/store/memory_store.rs b/crates/matrix-sdk-base/src/store/memory_store.rs index c5d8c01384a..2ba91d6f200 100644 --- a/crates/matrix-sdk-base/src/store/memory_store.rs +++ b/crates/matrix-sdk-base/src/store/memory_store.rs @@ -36,12 +36,10 @@ use ruma::{ use tracing::{debug, instrument, trace, warn}; use super::{ - traits::{ - ChildTransactionId, ComposerDraft, QueuedEvent, SerializableEventContent, - ServerCapabilities, - }, - DependentQueuedEvent, DependentQueuedEventKind, Result, RoomInfo, StateChanges, StateStore, - StoreError, + send_queue::{ChildTransactionId, QueuedRequest, SentRequestKey}, + traits::{ComposerDraft, ServerCapabilities}, + DependentQueuedRequest, DependentQueuedRequestKind, QueuedRequestKind, Result, RoomInfo, + StateChanges, StateStore, StoreError, }; use crate::{ deserialized_responses::RawAnySyncOrStrippedState, store::QueueWedgeError, @@ -90,8 +88,8 @@ pub struct MemoryStore { >, >, custom: StdRwLock, Vec>>, - send_queue_events: StdRwLock>>, - dependent_send_queue_events: StdRwLock>>, + send_queue_events: StdRwLock>>, + dependent_send_queue_events: StdRwLock>>, } impl MemoryStore { @@ -804,26 +802,26 @@ impl StateStore for MemoryStore { Ok(()) } - async fn save_send_queue_event( + async fn save_send_queue_request( &self, room_id: &RoomId, transaction_id: OwnedTransactionId, - event: SerializableEventContent, + kind: QueuedRequestKind, ) -> Result<(), Self::Error> { self.send_queue_events .write() .unwrap() .entry(room_id.to_owned()) .or_default() - .push(QueuedEvent { event, transaction_id, error: None }); + .push(QueuedRequest { kind, transaction_id, error: None }); Ok(()) } - async fn update_send_queue_event( + async fn update_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, - content: SerializableEventContent, + kind: QueuedRequestKind, ) -> Result { if let Some(entry) = self .send_queue_events @@ -834,7 +832,7 @@ impl StateStore for MemoryStore { .iter_mut() .find(|item| item.transaction_id == transaction_id) { - entry.event = content; + entry.kind = kind; entry.error = None; Ok(true) } else { @@ -842,7 +840,7 @@ impl StateStore for MemoryStore { } } - async fn remove_send_queue_event( + async fn remove_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -865,14 +863,14 @@ impl StateStore for MemoryStore { Ok(false) } - async fn load_send_queue_events( + async fn load_send_queue_requests( &self, room_id: &RoomId, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { Ok(self.send_queue_events.write().unwrap().entry(room_id.to_owned()).or_default().clone()) } - async fn update_send_queue_event_status( + async fn update_send_queue_request_status( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -892,45 +890,45 @@ impl StateStore for MemoryStore { Ok(()) } - async fn load_rooms_with_unsent_events(&self) -> Result, Self::Error> { + async fn load_rooms_with_unsent_requests(&self) -> Result, Self::Error> { Ok(self.send_queue_events.read().unwrap().keys().cloned().collect()) } - async fn save_dependent_send_queue_event( + async fn save_dependent_queued_request( &self, room: &RoomId, parent_transaction_id: &TransactionId, own_transaction_id: ChildTransactionId, - content: DependentQueuedEventKind, + content: DependentQueuedRequestKind, ) -> Result<(), Self::Error> { self.dependent_send_queue_events.write().unwrap().entry(room.to_owned()).or_default().push( - DependentQueuedEvent { + DependentQueuedRequest { kind: content, parent_transaction_id: parent_transaction_id.to_owned(), own_transaction_id, - event_id: None, + parent_key: None, }, ); Ok(()) } - async fn update_dependent_send_queue_event( + async fn update_dependent_queued_request( &self, room: &RoomId, parent_txn_id: &TransactionId, - event_id: OwnedEventId, + sent_parent_key: SentRequestKey, ) -> Result { let mut dependent_send_queue_events = self.dependent_send_queue_events.write().unwrap(); let dependents = dependent_send_queue_events.entry(room.to_owned()).or_default(); let mut num_updated = 0; for d in dependents.iter_mut().filter(|item| item.parent_transaction_id == parent_txn_id) { - d.event_id = Some(event_id.clone()); + d.parent_key = Some(sent_parent_key.clone()); num_updated += 1; } Ok(num_updated) } - async fn remove_dependent_send_queue_event( + async fn remove_dependent_queued_request( &self, room: &RoomId, txn_id: &ChildTransactionId, @@ -949,10 +947,10 @@ impl StateStore for MemoryStore { /// /// This returns absolutely all the dependent send queue events, whether /// they have an event id or not. - async fn list_dependent_send_queue_events( + async fn load_dependent_queued_requests( &self, room: &RoomId, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { Ok(self.dependent_send_queue_events.read().unwrap().get(room).cloned().unwrap_or_default()) } } diff --git a/crates/matrix-sdk-base/src/store/mod.rs b/crates/matrix-sdk-base/src/store/mod.rs index 6c79893923c..cd4b7c67cd1 100644 --- a/crates/matrix-sdk-base/src/store/mod.rs +++ b/crates/matrix-sdk-base/src/store/mod.rs @@ -68,16 +68,19 @@ use crate::{ pub(crate) mod ambiguity_map; mod memory_store; pub mod migration_helpers; +mod send_queue; #[cfg(any(test, feature = "testing"))] pub use self::integration_tests::StateStoreIntegrationTests; pub use self::{ memory_store::MemoryStore, + send_queue::{ + ChildTransactionId, DependentQueuedRequest, DependentQueuedRequestKind, QueueWedgeError, + QueuedRequest, QueuedRequestKind, SentRequestKey, SerializableEventContent, + }, traits::{ - ChildTransactionId, ComposerDraft, ComposerDraftType, DependentQueuedEvent, - DependentQueuedEventKind, DynStateStore, IntoStateStore, QueueWedgeError, QueuedEvent, - SerializableEventContent, ServerCapabilities, StateStore, StateStoreDataKey, - StateStoreDataValue, StateStoreExt, + ComposerDraft, ComposerDraftType, DynStateStore, IntoStateStore, ServerCapabilities, + StateStore, StateStoreDataKey, StateStoreDataValue, StateStoreExt, }, }; diff --git a/crates/matrix-sdk-base/src/store/send_queue.rs b/crates/matrix-sdk-base/src/store/send_queue.rs new file mode 100644 index 00000000000..7efe0f13633 --- /dev/null +++ b/crates/matrix-sdk-base/src/store/send_queue.rs @@ -0,0 +1,266 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! All data types related to the send queue. + +use std::{collections::BTreeMap, fmt, ops::Deref}; + +use as_variant::as_variant; +use ruma::{ + events::{AnyMessageLikeEventContent, EventContent as _, RawExt as _}, + serde::Raw, + OwnedDeviceId, OwnedEventId, OwnedTransactionId, OwnedUserId, TransactionId, +}; +use serde::{Deserialize, Serialize}; + +/// A thin wrapper to serialize a `AnyMessageLikeEventContent`. +#[derive(Clone, Serialize, Deserialize)] +pub struct SerializableEventContent { + event: Raw, + event_type: String, +} + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for SerializableEventContent { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Don't include the event in the debug display. + f.debug_struct("SerializedEventContent") + .field("event_type", &self.event_type) + .finish_non_exhaustive() + } +} + +impl SerializableEventContent { + /// Create a [`SerializableEventContent`] from a raw + /// [`AnyMessageLikeEventContent`] along with its type. + pub fn from_raw(event: Raw, event_type: String) -> Self { + Self { event_type, event } + } + + /// Create a [`SerializableEventContent`] from an + /// [`AnyMessageLikeEventContent`]. + pub fn new(event: &AnyMessageLikeEventContent) -> Result { + Ok(Self::from_raw(Raw::new(event)?, event.event_type().to_string())) + } + + /// Convert a [`SerializableEventContent`] back into a + /// [`AnyMessageLikeEventContent`]. + pub fn deserialize(&self) -> Result { + self.event.deserialize_with_type(self.event_type.clone().into()) + } + + /// Returns the raw event content along with its type. + /// + /// Useful for callers manipulating custom events. + pub fn raw(&self) -> (&Raw, &str) { + (&self.event, &self.event_type) + } +} + +/// The kind of a send queue request. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum QueuedRequestKind { + /// An event to be sent via the send queue. + Event { + /// The content of the message-like event we'd like to send. + content: SerializableEventContent, + }, +} + +impl From for QueuedRequestKind { + fn from(content: SerializableEventContent) -> Self { + Self::Event { content } + } +} + +/// A request to be sent with a send queue. +#[derive(Clone)] +pub struct QueuedRequest { + /// The kind of queued request we're going to send. + pub kind: QueuedRequestKind, + + /// Unique transaction id for the queued request, acting as a key. + pub transaction_id: OwnedTransactionId, + + /// Error returned when the request couldn't be sent and is stuck in the + /// unrecoverable state. + /// + /// `None` if the request is in the queue, waiting to be sent. + pub error: Option, +} + +impl QueuedRequest { + /// Returns `Some` if the queued request is about sending an event. + pub fn as_event(&self) -> Option<&SerializableEventContent> { + as_variant!(&self.kind, QueuedRequestKind::Event { content } => content) + } + + /// True if the request couldn't be sent because of an unrecoverable API + /// error. See [`Self::error`] for more details on the reason. + pub fn is_wedged(&self) -> bool { + self.error.is_some() + } +} + +/// Represents a failed to send unrecoverable error of an event sent via the +/// send queue. +/// +/// It is a serializable representation of a client error, see +/// `From` implementation for more details. These errors can not be +/// automatically retried, but yet some manual action can be taken before retry +/// sending. If not the only solution is to delete the local event. +#[derive(Clone, Debug, Serialize, Deserialize, thiserror::Error)] +pub enum QueueWedgeError { + /// This error occurs when there are some insecure devices in the room, and + /// the current encryption setting prohibits sharing with them. + #[error("There are insecure devices in the room")] + InsecureDevices { + /// The insecure devices as a Map of userID to deviceID. + user_device_map: BTreeMap>, + }, + + /// This error occurs when a previously verified user is not anymore, and + /// the current encryption setting prohibits sharing when it happens. + #[error("Some users that were previously verified are not anymore")] + IdentityViolations { + /// The users that are expected to be verified but are not. + users: Vec, + }, + + /// It is required to set up cross-signing and properly verify the current + /// session before sending. + #[error("Own verification is required")] + CrossVerificationRequired, + + /// Other errors. + #[error("Other unrecoverable error: {msg}")] + GenericApiError { + /// Description of the error. + msg: String, + }, +} + +/// The specific user intent that characterizes a +/// [`DependentQueuedRequestKind`]. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum DependentQueuedRequestKind { + /// The event should be edited. + EditEvent { + /// The new event for the content. + new_content: SerializableEventContent, + }, + + /// The event should be redacted/aborted/removed. + RedactEvent, + + /// The event should be reacted to, with the given key. + ReactEvent { + /// Key used for the reaction. + key: String, + }, +} + +/// A transaction id identifying a [`DependentQueuedRequest`] rather than its +/// parent [`QueuedRequest`]. +/// +/// This thin wrapper adds some safety to some APIs, making it possible to +/// distinguish between the parent's `TransactionId` and the dependent event's +/// own `TransactionId`. +#[repr(transparent)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ChildTransactionId(OwnedTransactionId); + +impl ChildTransactionId { + /// Returns a new [`ChildTransactionId`]. + #[allow(clippy::new_without_default)] + pub fn new() -> Self { + Self(TransactionId::new()) + } +} + +impl Deref for ChildTransactionId { + type Target = TransactionId; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl From for ChildTransactionId { + fn from(val: String) -> Self { + Self(val.into()) + } +} + +impl From for OwnedTransactionId { + fn from(val: ChildTransactionId) -> Self { + val.0 + } +} + +/// A unique key (identifier) indicating that a transaction has been +/// successfully sent to the server. +/// +/// The owning child transactions can now be resolved. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +pub enum SentRequestKey { + /// The parent transaction returned an event when it succeeded. + Event(OwnedEventId), +} + +impl SentRequestKey { + /// Converts the current parent key into an event id, if possible. + pub fn into_event_id(self) -> Option { + as_variant!(self, Self::Event) + } +} + +/// A request to be sent, depending on a [`QueuedRequest`] to be sent first. +/// +/// Depending on whether the parent request has been sent or not, this will +/// either update the local echo in the storage, or materialize an equivalent +/// request implementing the user intent to the homeserver. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct DependentQueuedRequest { + /// Unique identifier for this dependent queued request. + /// + /// Useful for deletion. + pub own_transaction_id: ChildTransactionId, + + /// The kind of user intent. + pub kind: DependentQueuedRequestKind, + + /// Transaction id for the parent's local echo / used in the server request. + /// + /// Note: this is the transaction id used for the depended-on request, i.e. + /// the one that was originally sent and that's being modified with this + /// dependent request. + pub parent_transaction_id: OwnedTransactionId, + + /// If the parent request has been sent, the parent's request identifier + /// returned by the server once the local echo has been sent out. + pub parent_key: Option, +} + +#[cfg(not(tarpaulin_include))] +impl fmt::Debug for QueuedRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Hide the content from the debug log. + f.debug_struct("QueuedRequest") + .field("transaction_id", &self.transaction_id) + .field("is_wedged", &self.is_wedged()) + .finish_non_exhaustive() + } +} diff --git a/crates/matrix-sdk-base/src/store/traits.rs b/crates/matrix-sdk-base/src/store/traits.rs index 142b7584529..4dabd9eefac 100644 --- a/crates/matrix-sdk-base/src/store/traits.rs +++ b/crates/matrix-sdk-base/src/store/traits.rs @@ -16,7 +16,6 @@ use std::{ borrow::Borrow, collections::{BTreeMap, BTreeSet}, fmt, - ops::Deref, sync::Arc, }; @@ -29,20 +28,23 @@ use ruma::{ events::{ presence::PresenceEvent, receipt::{Receipt, ReceiptThread, ReceiptType}, - AnyGlobalAccountDataEvent, AnyMessageLikeEventContent, AnyRoomAccountDataEvent, - EmptyStateKey, EventContent as _, GlobalAccountDataEvent, GlobalAccountDataEventContent, - GlobalAccountDataEventType, RawExt as _, RedactContent, RedactedStateEventContent, - RoomAccountDataEvent, RoomAccountDataEventContent, RoomAccountDataEventType, - StateEventType, StaticEventContent, StaticStateEventContent, + AnyGlobalAccountDataEvent, AnyRoomAccountDataEvent, EmptyStateKey, GlobalAccountDataEvent, + GlobalAccountDataEventContent, GlobalAccountDataEventType, RedactContent, + RedactedStateEventContent, RoomAccountDataEvent, RoomAccountDataEventContent, + RoomAccountDataEventType, StateEventType, StaticEventContent, StaticStateEventContent, }, serde::Raw, time::SystemTime, - EventId, OwnedDeviceId, OwnedEventId, OwnedMxcUri, OwnedRoomId, OwnedTransactionId, - OwnedUserId, RoomId, TransactionId, UserId, + EventId, OwnedEventId, OwnedMxcUri, OwnedRoomId, OwnedTransactionId, OwnedUserId, RoomId, + TransactionId, UserId, }; use serde::{Deserialize, Serialize}; -use super::{StateChanges, StoreError}; +use super::{ + send_queue::SentRequestKey, ChildTransactionId, DependentQueuedRequest, + DependentQueuedRequestKind, QueueWedgeError, QueuedRequest, QueuedRequestKind, StateChanges, + StoreError, +}; use crate::{ deserialized_responses::{RawAnySyncOrStrippedState, RawMemberEvent, RawSyncOrStrippedState}, MinimalRoomMemberEvent, RoomInfo, RoomMemberships, @@ -342,7 +344,7 @@ pub trait StateStore: AsyncTraitDeps { /// * `room_id` - The `RoomId` of the room to delete. async fn remove_room(&self, room_id: &RoomId) -> Result<(), Self::Error>; - /// Save an event to be sent by a send queue later. + /// Save a request to be sent by a send queue later (e.g. sending an event). /// /// # Arguments /// @@ -351,98 +353,104 @@ pub trait StateStore: AsyncTraitDeps { /// (and its transaction). Note: this is expected to be randomly generated /// and thus unique. /// * `content` - Serializable event content to be sent. - async fn save_send_queue_event( + async fn save_send_queue_request( &self, room_id: &RoomId, transaction_id: OwnedTransactionId, - content: SerializableEventContent, + request: QueuedRequestKind, ) -> Result<(), Self::Error>; - /// Updates a send queue event with the given content, and resets its wedged - /// status to false. + /// Updates a send queue request with the given content, and resets its + /// error status. /// /// # Arguments /// /// * `room_id` - The `RoomId` of the send queue's room. - /// * `transaction_id` - The unique key identifying the event to be sent + /// * `transaction_id` - The unique key identifying the request to be sent /// (and its transaction). /// * `content` - Serializable event content to replace the original one. /// - /// Returns true if an event has been updated, or false otherwise. - async fn update_send_queue_event( + /// Returns true if a request has been updated, or false otherwise. + async fn update_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, - content: SerializableEventContent, + content: QueuedRequestKind, ) -> Result; - /// Remove an event previously inserted with [`Self::save_send_queue_event`] - /// from the database, based on its transaction id. + /// Remove a request previously inserted with + /// [`Self::save_send_queue_request`] from the database, based on its + /// transaction id. /// - /// Returns true if an event has been removed, or false otherwise. - async fn remove_send_queue_event( + /// Returns true if something has been removed, or false otherwise. + async fn remove_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, ) -> Result; - /// Loads all the send queue events for the given room. - async fn load_send_queue_events( + /// Loads all the send queue requests for the given room. + async fn load_send_queue_requests( &self, room_id: &RoomId, - ) -> Result, Self::Error>; + ) -> Result, Self::Error>; /// Updates the send queue error status (wedge) for a given send queue - /// event. - /// Set `error` to None if the problem has been resolved and the event was - /// finally sent. - async fn update_send_queue_event_status( + /// request. + async fn update_send_queue_request_status( &self, room_id: &RoomId, transaction_id: &TransactionId, error: Option, ) -> Result<(), Self::Error>; - /// Loads all the rooms which have any pending events in their send queue. - async fn load_rooms_with_unsent_events(&self) -> Result, Self::Error>; + /// Loads all the rooms which have any pending requests in their send queue. + async fn load_rooms_with_unsent_requests(&self) -> Result, Self::Error>; - /// Add a new entry to the list of dependent send queue event for an event. - async fn save_dependent_send_queue_event( + /// Add a new entry to the list of dependent send queue requests for a + /// parent request. + async fn save_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, own_txn_id: ChildTransactionId, - content: DependentQueuedEventKind, + content: DependentQueuedRequestKind, ) -> Result<(), Self::Error>; - /// Update a set of dependent send queue events with an event id, - /// effectively marking them as ready. + /// Update a set of dependent send queue requests with a key identifying the + /// homeserver's response, effectively marking them as ready. + /// + /// ⚠ Beware! There's no verification applied that the parent key type is + /// compatible with the dependent event type. The invalid state may be + /// lazily filtered out in `load_dependent_queued_requests`. /// - /// Returns the number of updated events. - async fn update_dependent_send_queue_event( + /// Returns the number of updated requests. + async fn update_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, - event_id: OwnedEventId, + sent_parent_key: SentRequestKey, ) -> Result; - /// Remove a specific dependent send queue event by id. + /// Remove a specific dependent send queue request by id. /// - /// Returns true if the dependent send queue event has been indeed removed. - async fn remove_dependent_send_queue_event( + /// Returns true if the dependent send queue request has been indeed + /// removed. + async fn remove_dependent_queued_request( &self, room: &RoomId, own_txn_id: &ChildTransactionId, ) -> Result; - /// List all the dependent send queue events. + /// List all the dependent send queue requests. /// - /// This returns absolutely all the dependent send queue events, whether - /// they have an event id or not. They must be returned in insertion order. - async fn list_dependent_send_queue_events( + /// This returns absolutely all the dependent send queue requests, whether + /// they have a parent event id or not. As a contract for implementors, they + /// must be returned in insertion order. + async fn load_dependent_queued_requests( &self, room: &RoomId, - ) -> Result, Self::Error>; + ) -> Result, Self::Error>; } #[repr(transparent)] @@ -628,93 +636,93 @@ impl StateStore for EraseStateStoreError { self.0.remove_room(room_id).await.map_err(Into::into) } - async fn save_send_queue_event( + async fn save_send_queue_request( &self, room_id: &RoomId, transaction_id: OwnedTransactionId, - content: SerializableEventContent, + content: QueuedRequestKind, ) -> Result<(), Self::Error> { - self.0.save_send_queue_event(room_id, transaction_id, content).await.map_err(Into::into) + self.0.save_send_queue_request(room_id, transaction_id, content).await.map_err(Into::into) } - async fn update_send_queue_event( + async fn update_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, - content: SerializableEventContent, + content: QueuedRequestKind, ) -> Result { - self.0.update_send_queue_event(room_id, transaction_id, content).await.map_err(Into::into) + self.0.update_send_queue_request(room_id, transaction_id, content).await.map_err(Into::into) } - async fn remove_send_queue_event( + async fn remove_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, ) -> Result { - self.0.remove_send_queue_event(room_id, transaction_id).await.map_err(Into::into) + self.0.remove_send_queue_request(room_id, transaction_id).await.map_err(Into::into) } - async fn load_send_queue_events( + async fn load_send_queue_requests( &self, room_id: &RoomId, - ) -> Result, Self::Error> { - self.0.load_send_queue_events(room_id).await.map_err(Into::into) + ) -> Result, Self::Error> { + self.0.load_send_queue_requests(room_id).await.map_err(Into::into) } - async fn update_send_queue_event_status( + async fn update_send_queue_request_status( &self, room_id: &RoomId, transaction_id: &TransactionId, error: Option, ) -> Result<(), Self::Error> { self.0 - .update_send_queue_event_status(room_id, transaction_id, error) + .update_send_queue_request_status(room_id, transaction_id, error) .await .map_err(Into::into) } - async fn load_rooms_with_unsent_events(&self) -> Result, Self::Error> { - self.0.load_rooms_with_unsent_events().await.map_err(Into::into) + async fn load_rooms_with_unsent_requests(&self) -> Result, Self::Error> { + self.0.load_rooms_with_unsent_requests().await.map_err(Into::into) } - async fn save_dependent_send_queue_event( + async fn save_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, own_txn_id: ChildTransactionId, - content: DependentQueuedEventKind, + content: DependentQueuedRequestKind, ) -> Result<(), Self::Error> { self.0 - .save_dependent_send_queue_event(room_id, parent_txn_id, own_txn_id, content) + .save_dependent_queued_request(room_id, parent_txn_id, own_txn_id, content) .await .map_err(Into::into) } - async fn update_dependent_send_queue_event( + async fn update_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, - event_id: OwnedEventId, + sent_parent_key: SentRequestKey, ) -> Result { self.0 - .update_dependent_send_queue_event(room_id, parent_txn_id, event_id) + .update_dependent_queued_request(room_id, parent_txn_id, sent_parent_key) .await .map_err(Into::into) } - async fn remove_dependent_send_queue_event( + async fn remove_dependent_queued_request( &self, room_id: &RoomId, own_txn_id: &ChildTransactionId, ) -> Result { - self.0.remove_dependent_send_queue_event(room_id, own_txn_id).await.map_err(Into::into) + self.0.remove_dependent_queued_request(room_id, own_txn_id).await.map_err(Into::into) } - async fn list_dependent_send_queue_events( + async fn load_dependent_queued_requests( &self, room_id: &RoomId, - ) -> Result, Self::Error> { - self.0.list_dependent_send_queue_events(room_id).await.map_err(Into::into) + ) -> Result, Self::Error> { + self.0.load_dependent_queued_requests(room_id).await.map_err(Into::into) } } @@ -1103,210 +1111,6 @@ impl StateStoreDataKey<'_> { pub const COMPOSER_DRAFT: &'static str = "composer_draft"; } -/// A thin wrapper to serialize a `AnyMessageLikeEventContent`. -#[derive(Clone, Serialize, Deserialize)] -pub struct SerializableEventContent { - event: Raw, - event_type: String, -} - -#[cfg(not(tarpaulin_include))] -impl fmt::Debug for SerializableEventContent { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Don't include the event in the debug display. - f.debug_struct("SerializedEventContent") - .field("event_type", &self.event_type) - .finish_non_exhaustive() - } -} - -impl SerializableEventContent { - /// Create a [`SerializableEventContent`] from a raw - /// [`AnyMessageLikeEventContent`] along with its type. - pub fn from_raw(event: Raw, event_type: String) -> Self { - Self { event_type, event } - } - - /// Create a [`SerializableEventContent`] from an - /// [`AnyMessageLikeEventContent`]. - pub fn new(event: &AnyMessageLikeEventContent) -> Result { - Ok(Self::from_raw(Raw::new(event)?, event.event_type().to_string())) - } - - /// Convert a [`SerializableEventContent`] back into a - /// [`AnyMessageLikeEventContent`]. - pub fn deserialize(&self) -> Result { - self.event.deserialize_with_type(self.event_type.clone().into()) - } - - /// Returns the raw event content along with its type. - /// - /// Useful for callers manipulating custom events. - pub fn raw(self) -> (Raw, String) { - (self.event, self.event_type) - } -} - -/// An event to be sent with a send queue. -#[derive(Clone)] -pub struct QueuedEvent { - /// The content of the message-like event we'd like to send. - pub event: SerializableEventContent, - - /// Unique transaction id for the queued event, acting as a key. - pub transaction_id: OwnedTransactionId, - - /// Set when the event couldn't be sent because of an unrecoverable API - /// error. `None` if the event is in queue for being sent. - pub error: Option, -} - -impl QueuedEvent { - /// True if the event couldn't be sent because of an unrecoverable API - /// error. See [`Self::error`] for more details on the reason. - pub fn is_wedged(&self) -> bool { - self.error.is_some() - } -} - -/// Represents a failed to send unrecoverable error of an event sent via the -/// send_queue. -/// -/// It is a serializable representation of a client error, see -/// `From` implementation for more details. These errors can not be -/// automatically retried, but yet some manual action can be taken before retry -/// sending. If not the only solution is to delete the local event. -#[derive(Clone, Debug, Serialize, Deserialize, thiserror::Error)] -pub enum QueueWedgeError { - /// This error occurs when there are some insecure devices in the room, and - /// the current encryption setting prohibits sharing with them. - #[error("There are insecure devices in the room")] - InsecureDevices { - /// The insecure devices as a Map of userID to deviceID. - user_device_map: BTreeMap>, - }, - - /// This error occurs when a previously verified user is not anymore, and - /// the current encryption setting prohibits sharing when it happens. - #[error("Some users that were previously verified are not anymore")] - IdentityViolations { - /// The users that are expected to be verified but are not. - users: Vec, - }, - - /// It is required to set up cross-signing and properly verify the current - /// session before sending. - #[error("Own verification is required")] - CrossVerificationRequired, - - /// Other errors. - #[error("Other unrecoverable error: {msg}")] - GenericApiError { - /// Description of the error. - msg: String, - }, -} - -/// The specific user intent that characterizes a [`DependentQueuedEvent`]. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum DependentQueuedEventKind { - /// The event should be edited. - Edit { - /// The new event for the content. - new_content: SerializableEventContent, - }, - - /// The event should be redacted/aborted/removed. - Redact, - - /// The event should be reacted to, with the given key. - React { - /// Key used for the reaction. - key: String, - }, -} - -/// A transaction id identifying a [`DependentQueuedEvent`] rather than its -/// parent [`QueuedEvent`]. -/// -/// This thin wrapper adds some safety to some APIs, making it possible to -/// distinguish between the parent's `TransactionId` and the dependent event's -/// own `TransactionId`. -#[repr(transparent)] -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(transparent)] -pub struct ChildTransactionId(OwnedTransactionId); - -impl ChildTransactionId { - /// Returns a new [`ChildTransactionId`]. - #[allow(clippy::new_without_default)] - pub fn new() -> Self { - Self(TransactionId::new()) - } -} - -impl Deref for ChildTransactionId { - type Target = TransactionId; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl From for ChildTransactionId { - fn from(val: String) -> Self { - Self(val.into()) - } -} - -impl From for OwnedTransactionId { - fn from(val: ChildTransactionId) -> Self { - val.0 - } -} - -/// An event to be sent, depending on a [`QueuedEvent`] to be sent first. -/// -/// Depending on whether the event has been sent or not, this will either update -/// the local echo in the storage, or send an event equivalent to the user -/// intent to the homeserver. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct DependentQueuedEvent { - /// Unique identifier for this dependent queued event. - /// - /// Useful for deletion. - pub own_transaction_id: ChildTransactionId, - - /// The kind of user intent. - pub kind: DependentQueuedEventKind, - - /// Transaction id for the parent's local echo / used in the server request. - /// - /// Note: this is the transaction id used for the depended-on event, i.e. - /// the one that was originally sent and that's being modified with this - /// dependent event. - pub parent_transaction_id: OwnedTransactionId, - - /// If the parent event has been sent, the parent's event identifier - /// returned by the server once the local echo has been sent out. - /// - /// Note: this is the event id used for the depended-on event after it's - /// been sent, not for a possible event that could have been sent - /// because of this [`DependentQueuedEvent`]. - pub event_id: Option, -} - -#[cfg(not(tarpaulin_include))] -impl fmt::Debug for QueuedEvent { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Hide the content from the debug log. - f.debug_struct("QueuedEvent") - .field("transaction_id", &self.transaction_id) - .field("is_wedged", &self.is_wedged()) - .finish_non_exhaustive() - } -} - #[cfg(test)] mod tests { use super::{now_timestamp_ms, ServerCapabilities}; diff --git a/crates/matrix-sdk-common/src/ring_buffer.rs b/crates/matrix-sdk-common/src/ring_buffer.rs index 8b3bbcff15a..89aed5bec62 100644 --- a/crates/matrix-sdk-common/src/ring_buffer.rs +++ b/crates/matrix-sdk-common/src/ring_buffer.rs @@ -14,7 +14,7 @@ use std::{ collections::{ - vec_deque::{Drain, Iter}, + vec_deque::{Drain, Iter, IterMut}, VecDeque, }, num::NonZeroUsize, @@ -88,6 +88,13 @@ impl RingBuffer { self.inner.iter() } + /// Returns a mutable iterator that provides elements in front-to-back + /// order, i.e. the same order you would get if you repeatedly called + /// pop(). + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + self.inner.iter_mut() + } + /// Returns an iterator that drains its items. pub fn drain(&mut self, range: R) -> Drain<'_, T> where @@ -221,6 +228,24 @@ mod tests { assert_eq!(ring_buffer.remove(10), None); } + #[test] + fn test_iter() { + let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(5).unwrap()); + + ring_buffer.push(1); + ring_buffer.push(2); + ring_buffer.push(3); + + let as_vec = ring_buffer.iter().copied().collect::>(); + assert_eq!(as_vec, [1, 2, 3]); + + let first_entry = ring_buffer.iter_mut().next().unwrap(); + *first_entry = 42; + + let as_vec = ring_buffer.iter().copied().collect::>(); + assert_eq!(as_vec, [42, 2, 3]); + } + #[test] fn test_drain() { let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(5).unwrap()); @@ -243,14 +268,14 @@ mod tests { } #[test] - fn clear_on_empty_buffer_is_a_noop() { + fn test_clear_on_empty_buffer_is_a_noop() { let mut ring_buffer: RingBuffer = RingBuffer::new(NonZeroUsize::new(3).unwrap()); ring_buffer.clear(); assert_eq!(ring_buffer.len(), 0); } #[test] - fn clear_removes_all_items() { + fn test_clear_removes_all_items() { // Given a RingBuffer that has been used let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(3).unwrap()); ring_buffer.push(4); @@ -270,7 +295,7 @@ mod tests { } #[test] - fn clear_does_not_affect_capacity() { + fn test_clear_does_not_affect_capacity() { // Given a RingBuffer that has been used let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(3).unwrap()); ring_buffer.push(4); @@ -288,7 +313,7 @@ mod tests { } #[test] - fn capacity_is_what_we_passed_to_new() { + fn test_capacity_is_what_we_passed_to_new() { // Given a RingBuffer let ring_buffer = RingBuffer::::new(NonZeroUsize::new(13).unwrap()); // When I ask for its capacity I get what I provided at the start @@ -296,7 +321,7 @@ mod tests { } #[test] - fn capacity_is_not_affected_by_overflowing() { + fn test_capacity_is_not_affected_by_overflowing() { // Given a RingBuffer that has been used let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(3).unwrap()); ring_buffer.push(4); @@ -318,7 +343,7 @@ mod tests { } #[test] - fn roundtrip_serialization() { + fn test_roundtrip_serialization() { // Given a RingBuffer let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(3).unwrap()); ring_buffer.push("1".to_owned()); @@ -338,7 +363,7 @@ mod tests { } #[test] - fn extending_an_empty_ringbuffer_adds_the_items() { + fn test_extending_an_empty_ringbuffer_adds_the_items() { // Given a RingBuffer let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(5).unwrap()); @@ -350,7 +375,7 @@ mod tests { } #[test] - fn extend_adds_items_to_the_end() { + fn test_extend_adds_items_to_the_end() { // Given a RingBuffer with something in it let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(5).unwrap()); ring_buffer.push("1".to_owned()); @@ -367,7 +392,7 @@ mod tests { } #[test] - fn extend_does_not_overflow_max_length() { + fn test_extend_does_not_overflow_max_length() { // Given a RingBuffer with something in it let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(5).unwrap()); ring_buffer.push("1".to_owned()); @@ -390,7 +415,7 @@ mod tests { } #[test] - fn extending_a_full_ringbuffer_preserves_max_length() { + fn test_extending_a_full_ringbuffer_preserves_max_length() { // Given a full RingBuffer with something in it let mut ring_buffer = RingBuffer::new(NonZeroUsize::new(2).unwrap()); ring_buffer.push("1".to_owned()); diff --git a/crates/matrix-sdk-crypto/src/backups/mod.rs b/crates/matrix-sdk-crypto/src/backups/mod.rs index a3fe22fc1d2..fc98dfa03d0 100644 --- a/crates/matrix-sdk-crypto/src/backups/mod.rs +++ b/crates/matrix-sdk-crypto/src/backups/mod.rs @@ -224,19 +224,19 @@ impl BackupMachine { if device_key_id.algorithm() == DeviceKeyAlgorithm::Ed25519 { // No need to check our own device here, we're doing that using // the check_own_device_signature(). - if device_key_id.device_id() == self.store.static_account().device_id { + if device_key_id.key_name() == self.store.static_account().device_id { continue; } let state = self .test_ed25519_device_signature( - device_key_id.device_id(), + device_key_id.key_name(), signatures, auth_data, ) .await?; - result.insert(device_key_id.device_id().to_owned(), state); + result.insert(device_key_id.key_name().to_owned(), state); // Abort the loop if we found a trusted and valid signature, // unless we should check all of them. diff --git a/crates/matrix-sdk-crypto/src/error.rs b/crates/matrix-sdk-crypto/src/error.rs index 4400da9d696..2e3348dce11 100644 --- a/crates/matrix-sdk-crypto/src/error.rs +++ b/crates/matrix-sdk-crypto/src/error.rs @@ -305,13 +305,6 @@ pub enum SessionCreationError { )] OneTimeKeyMissing(OwnedUserId, OwnedDeviceId), - /// The one-time key algorithm is unsupported. - #[error( - "Tried to create a new Olm session for {0} {1}, but the one-time \ - key algorithm is unsupported" - )] - OneTimeKeyUnknown(OwnedUserId, OwnedDeviceId), - /// Failed to verify the one-time key signatures. #[error( "Failed to verify the signature of a one-time key, key: {one_time_key:?}, \ diff --git a/crates/matrix-sdk-crypto/src/gossiping/machine.rs b/crates/matrix-sdk-crypto/src/gossiping/machine.rs index 22a4e4e9cdd..9fa0bd9fdaf 100644 --- a/crates/matrix-sdk-crypto/src/gossiping/machine.rs +++ b/crates/matrix-sdk-crypto/src/gossiping/machine.rs @@ -34,7 +34,7 @@ use ruma::{ events::secret::request::{ RequestAction, SecretName, ToDeviceSecretRequestEvent as SecretRequestEvent, }, - DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedTransactionId, OwnedUserId, RoomId, + DeviceId, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedTransactionId, OwnedUserId, RoomId, TransactionId, UserId, }; use tracing::{debug, field::debug, info, instrument, trace, warn, Span}; @@ -178,7 +178,7 @@ impl GossipMachine { .map(|(key, value)| { let device_map = value .iter() - .map(|d| (d.to_owned(), DeviceKeyAlgorithm::SignedCurve25519)) + .map(|d| (d.to_owned(), OneTimeKeyAlgorithm::SignedCurve25519)) .collect(); (key.to_owned(), device_map) diff --git a/crates/matrix-sdk-crypto/src/machine/mod.rs b/crates/matrix-sdk-crypto/src/machine/mod.rs index 3624f4aa2cb..9439a83dea4 100644 --- a/crates/matrix-sdk-crypto/src/machine/mod.rs +++ b/crates/matrix-sdk-crypto/src/machine/mod.rs @@ -44,7 +44,7 @@ use ruma::{ AnyToDeviceEvent, MessageLikeEventContent, }, serde::{JsonObject, Raw}, - DeviceId, DeviceKeyAlgorithm, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId, + DeviceId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedDeviceKeyId, OwnedTransactionId, OwnedUserId, RoomId, TransactionId, UInt, UserId, }; use serde_json::{value::to_raw_value, Value}; @@ -2557,9 +2557,9 @@ pub struct EncryptionSyncChanges<'a> { /// sync response. pub changed_devices: &'a DeviceLists, /// The number of one time keys, as returned in the sync response. - pub one_time_keys_counts: &'a BTreeMap, + pub one_time_keys_counts: &'a BTreeMap, /// An optional list of fallback keys. - pub unused_fallback_keys: Option<&'a [DeviceKeyAlgorithm]>, + pub unused_fallback_keys: Option<&'a [OneTimeKeyAlgorithm]>, /// A next-batch token obtained from a to-device sync query. pub next_batch_token: Option, } diff --git a/crates/matrix-sdk-crypto/src/machine/test_helpers.rs b/crates/matrix-sdk-crypto/src/machine/test_helpers.rs index c1ace327dea..684b509436a 100644 --- a/crates/matrix-sdk-crypto/src/machine/test_helpers.rs +++ b/crates/matrix-sdk-crypto/src/machine/test_helpers.rs @@ -21,13 +21,15 @@ use as_variant::as_variant; use matrix_sdk_test::{ruma_response_from_json, test_json}; use ruma::{ api::client::keys::{ - claim_keys, get_keys, get_keys::v3::Response as KeysQueryResponse, upload_keys, + claim_keys, + get_keys::{self, v3::Response as KeysQueryResponse}, + upload_keys, }, device_id, encryption::OneTimeKey, events::dummy::ToDeviceDummyEventContent, serde::Raw, - user_id, DeviceId, OwnedDeviceKeyId, TransactionId, UserId, + user_id, DeviceId, OwnedOneTimeKeyId, TransactionId, UserId, }; use serde_json::json; @@ -37,7 +39,7 @@ use crate::{ }; /// These keys need to be periodically uploaded to the server. -type OneTimeKeys = BTreeMap>; +type OneTimeKeys = BTreeMap>; fn alice_device_id() -> &'static DeviceId { device_id!("JLAFKJWSCS") @@ -178,7 +180,7 @@ pub async fn create_session( machine: &OlmMachine, user_id: &UserId, device_id: &DeviceId, - key_id: OwnedDeviceKeyId, + key_id: OwnedOneTimeKeyId, one_time_key: Raw, ) { let one_time_keys = BTreeMap::from([( diff --git a/crates/matrix-sdk-crypto/src/machine/tests/mod.rs b/crates/matrix-sdk-crypto/src/machine/tests/mod.rs index 21dabf2023a..0de4cbcc8ad 100644 --- a/crates/matrix-sdk-crypto/src/machine/tests/mod.rs +++ b/crates/matrix-sdk-crypto/src/machine/tests/mod.rs @@ -37,7 +37,7 @@ use ruma::{ room_id, serde::Raw, uint, user_id, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, - TransactionId, UserId, + OneTimeKeyAlgorithm, TransactionId, UserId, }; use serde_json::json; use vodozemac::{ @@ -174,7 +174,7 @@ async fn test_generate_one_time_keys() { .await .unwrap(); - response.one_time_key_counts.insert(DeviceKeyAlgorithm::SignedCurve25519, uint!(50)); + response.one_time_key_counts.insert(OneTimeKeyAlgorithm::SignedCurve25519, uint!(50)); machine.receive_keys_upload_response(&response).await.unwrap(); @@ -275,7 +275,7 @@ fn test_one_time_key_signing() { async fn test_keys_for_upload() { let machine = OlmMachine::new(user_id(), alice_device_id()).await; - let key_counts = BTreeMap::from([(DeviceKeyAlgorithm::SignedCurve25519, 49u8.into())]); + let key_counts = BTreeMap::from([(OneTimeKeyAlgorithm::SignedCurve25519, 49u8.into())]); machine .receive_sync_changes(EncryptionSyncChanges { to_device_events: Vec::new(), @@ -327,7 +327,7 @@ async fn test_keys_for_upload() { let mut response = keys_upload_response(); response.one_time_key_counts.insert( - DeviceKeyAlgorithm::SignedCurve25519, + OneTimeKeyAlgorithm::SignedCurve25519, account.max_one_time_keys().try_into().unwrap(), ); diff --git a/crates/matrix-sdk-crypto/src/olm/account.rs b/crates/matrix-sdk-crypto/src/olm/account.rs index 27fc53ab73d..b10340feeea 100644 --- a/crates/matrix-sdk-crypto/src/olm/account.rs +++ b/crates/matrix-sdk-crypto/src/olm/account.rs @@ -31,8 +31,9 @@ use ruma::{ }, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, - OwnedDeviceKeyId, OwnedUserId, RoomId, SecondsSinceUnixEpoch, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, + OneTimeKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedOneTimeKeyId, OwnedUserId, RoomId, + SecondsSinceUnixEpoch, UInt, UserId, }; use serde::{de::Error, Deserialize, Serialize}; use serde_json::{ @@ -403,7 +404,7 @@ impl fmt::Debug for Account { } } -pub type OneTimeKeys = BTreeMap>; +pub type OneTimeKeys = BTreeMap>; pub type FallbackKeys = OneTimeKeys; impl Account { @@ -521,10 +522,10 @@ impl Account { pub(crate) fn update_key_counts( &mut self, - one_time_key_counts: &BTreeMap, - unused_fallback_keys: Option<&[DeviceKeyAlgorithm]>, + one_time_key_counts: &BTreeMap, + unused_fallback_keys: Option<&[OneTimeKeyAlgorithm]>, ) { - if let Some(count) = one_time_key_counts.get(&DeviceKeyAlgorithm::SignedCurve25519) { + if let Some(count) = one_time_key_counts.get(&OneTimeKeyAlgorithm::SignedCurve25519) { let count: u64 = (*count).into(); let old_count = self.uploaded_key_count(); @@ -827,9 +828,7 @@ impl Account { /// Sign and prepare one-time keys to be uploaded. /// /// If no one-time keys need to be uploaded, returns an empty `BTreeMap`. - pub fn signed_one_time_keys( - &self, - ) -> BTreeMap> { + pub fn signed_one_time_keys(&self) -> OneTimeKeys { let one_time_keys = self.one_time_keys(); if one_time_keys.is_empty() { @@ -842,9 +841,7 @@ impl Account { /// Sign and prepare fallback keys to be uploaded. /// /// If no fallback keys need to be uploaded returns an empty BTreeMap. - pub fn signed_fallback_keys( - &self, - ) -> BTreeMap> { + pub fn signed_fallback_keys(&self) -> FallbackKeys { let fallback_key = self.fallback_key(); if fallback_key.is_empty() { @@ -858,15 +855,15 @@ impl Account { &self, keys: HashMap, fallback: bool, - ) -> BTreeMap> { + ) -> OneTimeKeys { let mut keys_map = BTreeMap::new(); for (key_id, key) in keys { let signed_key = self.sign_key(key, fallback); keys_map.insert( - DeviceKeyId::from_parts( - DeviceKeyAlgorithm::SignedCurve25519, + OneTimeKeyId::from_parts( + OneTimeKeyAlgorithm::SignedCurve25519, key_id.to_base64().as_str().into(), ), signed_key.into_raw(), @@ -949,7 +946,7 @@ impl Account { )] fn find_pre_key_bundle( device: &DeviceData, - key_map: &BTreeMap>, + key_map: &OneTimeKeys, ) -> Result { let mut keys = key_map.iter(); @@ -965,10 +962,6 @@ impl Account { let result = match first_key { OneTimeKey::SignedKey(key) => Ok(PrekeyBundle::Olm3DH { key }), - _ => Err(SessionCreationError::OneTimeKeyUnknown( - device.user_id().to_owned(), - device.device_id().into(), - )), }; trace!(?result, "Finished searching for a valid pre-key bundle"); @@ -994,7 +987,7 @@ impl Account { pub fn create_outbound_session( &self, device: &DeviceData, - key_map: &BTreeMap>, + key_map: &OneTimeKeys, our_device_keys: DeviceKeys, ) -> Result { let pre_key_bundle = Self::find_pre_key_bundle(device, key_map)?; @@ -1502,8 +1495,8 @@ mod tests { use anyhow::Result; use matrix_sdk_test::async_test; use ruma::{ - device_id, user_id, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, - UserId, + device_id, user_id, DeviceId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, + OneTimeKeyId, UserId, }; use serde_json::json; @@ -1532,12 +1525,12 @@ mod tests { let (_, second_one_time_keys, _) = account.keys_for_upload(); assert!(!second_one_time_keys.is_empty()); - let device_key_ids: BTreeSet<&DeviceKeyId> = + let one_time_key_ids: BTreeSet<&OneTimeKeyId> = one_time_keys.keys().map(Deref::deref).collect(); - let second_device_key_ids: BTreeSet<&DeviceKeyId> = + let second_one_time_key_ids: BTreeSet<&OneTimeKeyId> = second_one_time_keys.keys().map(Deref::deref).collect(); - assert_eq!(device_key_ids, second_device_key_ids); + assert_eq!(one_time_key_ids, second_one_time_key_ids); account.mark_keys_as_published(); account.update_uploaded_key_count(50); @@ -1552,10 +1545,10 @@ mod tests { let (_, fourth_one_time_keys, _) = account.keys_for_upload(); assert!(!fourth_one_time_keys.is_empty()); - let fourth_device_key_ids: BTreeSet<&DeviceKeyId> = + let fourth_one_time_key_ids: BTreeSet<&OneTimeKeyId> = fourth_one_time_keys.keys().map(Deref::deref).collect(); - assert_ne!(device_key_ids, fourth_device_key_ids); + assert_ne!(one_time_key_ids, fourth_one_time_key_ids); Ok(()) } @@ -1573,7 +1566,7 @@ mod tests { "We should not upload fallback keys until we know if the server supports them." ); - let one_time_keys = BTreeMap::from([(DeviceKeyAlgorithm::SignedCurve25519, 50u8.into())]); + let one_time_keys = BTreeMap::from([(OneTimeKeyAlgorithm::SignedCurve25519, 50u8.into())]); // A `None` here means that the server doesn't support fallback keys, no // fallback key gets uploaded. diff --git a/crates/matrix-sdk-crypto/src/session_manager/group_sessions/mod.rs b/crates/matrix-sdk-crypto/src/session_manager/group_sessions/mod.rs index 06c4bef261f..1fc60ffd9c9 100644 --- a/crates/matrix-sdk-crypto/src/session_manager/group_sessions/mod.rs +++ b/crates/matrix-sdk-crypto/src/session_manager/group_sessions/mod.rs @@ -789,7 +789,7 @@ mod tests { events::room::history_visibility::HistoryVisibility, room_id, to_device::DeviceIdOrAllDevices, - user_id, DeviceId, DeviceKeyAlgorithm, TransactionId, UInt, UserId, + user_id, DeviceId, OneTimeKeyAlgorithm, TransactionId, UInt, UserId, }; use serde_json::{json, Value}; @@ -1347,7 +1347,7 @@ mod tests { let machine = OlmMachine::new(alice_id(), alice_device_id()).await; assert_let!(Ok(Some((txn_id, device_keys_request))) = machine.upload_device_keys().await); let device_keys_response = upload_keys::v3::Response::new(BTreeMap::from([( - DeviceKeyAlgorithm::SignedCurve25519, + OneTimeKeyAlgorithm::SignedCurve25519, UInt::new(device_keys_request.one_time_keys.len() as u64).unwrap(), )])); machine.mark_request_as_sent(&txn_id, &device_keys_response).await.unwrap(); diff --git a/crates/matrix-sdk-crypto/src/session_manager/sessions.rs b/crates/matrix-sdk-crypto/src/session_manager/sessions.rs index b6e7d850cb9..2ae2306c330 100644 --- a/crates/matrix-sdk-crypto/src/session_manager/sessions.rs +++ b/crates/matrix-sdk-crypto/src/session_manager/sessions.rs @@ -25,7 +25,7 @@ use ruma::{ }, assign, events::dummy::ToDeviceDummyEventContent, - DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedDeviceKeyId, OwnedServerName, + DeviceId, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedOneTimeKeyId, OwnedServerName, OwnedTransactionId, OwnedUserId, SecondsSinceUnixEpoch, ServerName, TransactionId, UserId, }; use tracing::{debug, error, info, instrument, warn}; @@ -261,7 +261,7 @@ impl SessionManager { missing_session_devices_by_user .entry(user_id.to_owned()) .or_default() - .insert(device_id, DeviceKeyAlgorithm::SignedCurve25519); + .insert(device_id, OneTimeKeyAlgorithm::SignedCurve25519); } } else { failed_devices_by_user @@ -279,7 +279,7 @@ impl SessionManager { missing_session_devices_by_user.entry(user.to_owned()).or_default().extend( device_ids .iter() - .map(|device_id| (device_id.clone(), DeviceKeyAlgorithm::SignedCurve25519)), + .map(|device_id| (device_id.clone(), OneTimeKeyAlgorithm::SignedCurve25519)), ); } @@ -346,7 +346,7 @@ impl SessionManager { failed_servers: &BTreeSet, one_time_keys: &BTreeMap< &OwnedUserId, - BTreeMap<&OwnedDeviceId, BTreeSet<&OwnedDeviceKeyId>>, + BTreeMap<&OwnedDeviceId, BTreeSet<&OwnedOneTimeKeyId>>, >, ) { // First check that the response is for the request we were expecting. diff --git a/crates/matrix-sdk-crypto/src/store/mod.rs b/crates/matrix-sdk-crypto/src/store/mod.rs index de19b08a4db..92fdc6f32ec 100644 --- a/crates/matrix-sdk-crypto/src/store/mod.rs +++ b/crates/matrix-sdk-crypto/src/store/mod.rs @@ -215,7 +215,7 @@ impl KeyQueryManager { Err(_) => { warn!( user_id = ?user, - "The user has a pending `/key/query` request which did \ + "The user has a pending `/keys/query` request which did \ not finish yet, some devices might be missing." ); diff --git a/crates/matrix-sdk-crypto/src/types/one_time_keys.rs b/crates/matrix-sdk-crypto/src/types/one_time_keys.rs index bb6457b6577..54b0f5d7c7b 100644 --- a/crates/matrix-sdk-crypto/src/types/one_time_keys.rs +++ b/crates/matrix-sdk-crypto/src/types/one_time_keys.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; -use ruma::{serde::Raw, DeviceKeyAlgorithm}; +use ruma::{serde::Raw, OneTimeKeyAlgorithm}; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::{value::to_raw_value, Value}; use vodozemac::Curve25519PublicKey; @@ -103,27 +103,17 @@ impl SignedKey { pub enum OneTimeKey { /// A signed Curve25519 one-time key. SignedKey(SignedKey), - - /// An unsigned Curve25519 one-time key. - #[serde(serialize_with = "serialize_curve_key")] - Key(Curve25519PublicKey), } impl OneTimeKey { - /// Deserialize the [`OneTimeKey`] from a [`DeviceKeyAlgorithm`] and a Raw + /// Deserialize the [`OneTimeKey`] from a [`OneTimeKeyAlgorithm`] and a Raw /// JSON value. pub fn deserialize( - algorithm: DeviceKeyAlgorithm, + algorithm: OneTimeKeyAlgorithm, key: &Raw, ) -> Result { match algorithm { - DeviceKeyAlgorithm::Curve25519 => { - let key: String = key.deserialize_as()?; - Ok(OneTimeKey::Key( - Curve25519PublicKey::from_base64(&key).map_err(serde::de::Error::custom)?, - )) - } - DeviceKeyAlgorithm::SignedCurve25519 => { + OneTimeKeyAlgorithm::SignedCurve25519 => { let key: SignedKey = key.deserialize_as()?; Ok(OneTimeKey::SignedKey(key)) } @@ -137,7 +127,6 @@ impl OneTimeKey { pub fn fallback(&self) -> bool { match self { OneTimeKey::SignedKey(s) => s.fallback(), - OneTimeKey::Key(_) => false, } } } diff --git a/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs b/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs index 98b55daea22..5d86dcf61a6 100644 --- a/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs +++ b/crates/matrix-sdk-indexeddb/src/state_store/migrations.rs @@ -46,7 +46,7 @@ use super::{ }; use crate::IndexeddbStateStoreError; -const CURRENT_DB_VERSION: u32 = 11; +const CURRENT_DB_VERSION: u32 = 12; const CURRENT_META_DB_VERSION: u32 = 2; /// Sometimes Migrations can't proceed without having to drop existing @@ -235,6 +235,9 @@ pub async fn upgrade_inner_db( if old_version < 11 { db = migrate_to_v11(db).await?; } + if old_version < 12 { + db = migrate_to_v12(db).await?; + } } db.close(); @@ -771,6 +774,26 @@ async fn migrate_to_v11(db: IdbDatabase) -> Result { apply_migration(db, 11, migration).await } +/// The format of data serialized into the send queue and dependent send queue +/// tables have changed, clear both. +async fn migrate_to_v12(db: IdbDatabase) -> Result { + let store_keys = &[keys::DEPENDENT_SEND_QUEUE, keys::ROOM_SEND_QUEUE]; + let tx = db.transaction_on_multi_with_mode(store_keys, IdbTransactionMode::Readwrite)?; + + for store_name in store_keys { + let store = tx.object_store(store_name)?; + store.clear()?; + } + + tx.await.into_result()?; + + let name = db.name(); + db.close(); + + // Update the version of the database. + Ok(IdbDatabase::open_u32(&name, 12)?.await?) +} + #[cfg(all(test, target_arch = "wasm32"))] mod tests { wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); diff --git a/crates/matrix-sdk-indexeddb/src/state_store/mod.rs b/crates/matrix-sdk-indexeddb/src/state_store/mod.rs index 193fd4c7bbf..39156c70a25 100644 --- a/crates/matrix-sdk-indexeddb/src/state_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/state_store/mod.rs @@ -25,9 +25,9 @@ use indexed_db_futures::prelude::*; use matrix_sdk_base::{ deserialized_responses::RawAnySyncOrStrippedState, store::{ - ChildTransactionId, ComposerDraft, DependentQueuedEvent, DependentQueuedEventKind, - QueuedEvent, SerializableEventContent, ServerCapabilities, StateChanges, StateStore, - StoreError, + ChildTransactionId, ComposerDraft, DependentQueuedRequest, DependentQueuedRequestKind, + QueuedRequest, QueuedRequestKind, SentRequestKey, SerializableEventContent, + ServerCapabilities, StateChanges, StateStore, StoreError, }, MinimalRoomMemberEvent, RoomInfo, RoomMemberships, StateStoreDataKey, StateStoreDataValue, }; @@ -423,22 +423,44 @@ impl IndexeddbStateStore { } } -/// A superset of [`QueuedEvent`] that also contains the room id, since we want -/// to return them. +/// A superset of [`QueuedRequest`] that also contains the room id, since we +/// want to return them. #[derive(Serialize, Deserialize)] -struct PersistedQueuedEvent { +struct PersistedQueuedRequest { /// In which room is this event going to be sent. pub room_id: OwnedRoomId, - // All these fields are the same as in [`QueuedEvent`]. - event: SerializableEventContent, + // All these fields are the same as in [`QueuedRequest`]. + /// Kind. Optional because it might be missing from previous formats. + kind: Option, transaction_id: OwnedTransactionId, - // Deprecated (from old format), now replaced with error field. - // Kept here for migration + pub error: Option, + + // Migrated fields: keep these private, they're not used anymore elsewhere in the code base. + /// Deprecated (from old format), now replaced with error field. is_wedged: Option, - pub error: Option, + event: Option, +} + +impl PersistedQueuedRequest { + fn into_queued_request(self) -> Option { + let kind = + self.kind.or_else(|| self.event.map(|content| QueuedRequestKind::Event { content }))?; + + let error = match self.is_wedged { + Some(true) => { + // Migrate to a generic error. + Some(QueueWedgeError::GenericApiError { + msg: "local echo failed to send in a previous session".into(), + }) + } + _ => self.error, + }; + + Some(QueuedRequest { kind, transaction_id: self.transaction_id, error }) + } } // Small hack to have the following macro invocation act as the appropriate @@ -1302,11 +1324,11 @@ impl_state_store!({ self.get_user_ids_inner(room_id, memberships, false).await } - async fn save_send_queue_event( + async fn save_send_queue_request( &self, room_id: &RoomId, transaction_id: OwnedTransactionId, - content: SerializableEventContent, + kind: QueuedRequestKind, ) -> Result<()> { let encoded_key = self.encode_key(keys::ROOM_SEND_QUEUE, room_id); @@ -1316,23 +1338,25 @@ impl_state_store!({ let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; - // We store an encoded vector of the queued events, with their transaction ids. + // We store an encoded vector of the queued requests, with their transaction + // ids. // Reload the previous vector for this room, or create an empty one. let prev = obj.get(&encoded_key)?.await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), )?; - // Push the new event. - prev.push(PersistedQueuedEvent { + // Push the new request. + prev.push(PersistedQueuedRequest { room_id: room_id.to_owned(), - event: content, + kind: Some(kind), transaction_id, - is_wedged: None, error: None, + is_wedged: None, + event: None, }); // Save the new vector into db. @@ -1343,11 +1367,11 @@ impl_state_store!({ Ok(()) } - async fn update_send_queue_event( + async fn update_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, - content: SerializableEventContent, + kind: QueuedRequestKind, ) -> Result { let encoded_key = self.encode_key(keys::ROOM_SEND_QUEUE, room_id); @@ -1357,21 +1381,25 @@ impl_state_store!({ let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; - // We store an encoded vector of the queued events, with their transaction ids. + // We store an encoded vector of the queued requests, with their transaction + // ids. // Reload the previous vector for this room, or create an empty one. let prev = obj.get(&encoded_key)?.await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), )?; - // Modify the one event. + // Modify the one request. if let Some(entry) = prev.iter_mut().find(|entry| entry.transaction_id == transaction_id) { - entry.event = content; - entry.is_wedged = None; + entry.kind = Some(kind); + // Reset the error state. entry.error = None; + // Remove migrated fields. + entry.is_wedged = None; + entry.event = None; // Save the new vector into db. obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; @@ -1383,7 +1411,7 @@ impl_state_store!({ } } - async fn remove_send_queue_event( + async fn remove_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -1397,11 +1425,12 @@ impl_state_store!({ let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; - // We store an encoded vector of the queued events, with their transaction ids. + // We store an encoded vector of the queued requests, with their transaction + // ids. // Reload the previous vector for this room. if let Some(val) = obj.get(&encoded_key)?.await? { - let mut prev = self.deserialize_value::>(&val)?; + let mut prev = self.deserialize_value::>(&val)?; if let Some(pos) = prev.iter().position(|item| item.transaction_id == transaction_id) { prev.remove(pos); @@ -1419,10 +1448,11 @@ impl_state_store!({ Ok(false) } - async fn load_send_queue_events(&self, room_id: &RoomId) -> Result> { + async fn load_send_queue_requests(&self, room_id: &RoomId) -> Result> { let encoded_key = self.encode_key(keys::ROOM_SEND_QUEUE, room_id); - // We store an encoded vector of the queued events, with their transaction ids. + // We store an encoded vector of the queued requests, with their transaction + // ids. let prev = self .inner .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)? @@ -1432,28 +1462,13 @@ impl_state_store!({ let prev = prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), )?; - Ok(prev - .into_iter() - .map(|item| QueuedEvent { - event: item.event, - transaction_id: item.transaction_id, - error: match item.is_wedged { - Some(true) => { - // migrate a generic error - Some(QueueWedgeError::GenericApiError { - msg: "local echo failed to send in a previous session".into(), - }) - } - _ => item.error, - }, - }) - .collect()) + Ok(prev.into_iter().filter_map(PersistedQueuedRequest::into_queued_request).collect()) } - async fn update_send_queue_event_status( + async fn update_send_queue_request_status( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -1468,12 +1483,12 @@ impl_state_store!({ let obj = tx.object_store(keys::ROOM_SEND_QUEUE)?; if let Some(val) = obj.get(&encoded_key)?.await? { - let mut prev = self.deserialize_value::>(&val)?; - if let Some(queued_event) = + let mut prev = self.deserialize_value::>(&val)?; + if let Some(request) = prev.iter_mut().find(|item| item.transaction_id == transaction_id) { - queued_event.is_wedged = None; - queued_event.error = error; + request.is_wedged = None; + request.error = error; obj.put_key_val(&encoded_key, &self.serialize_value(&prev)?)?; } } @@ -1483,7 +1498,7 @@ impl_state_store!({ Ok(()) } - async fn load_rooms_with_unsent_events(&self) -> Result> { + async fn load_rooms_with_unsent_requests(&self) -> Result> { let tx = self .inner .transaction_on_one_with_mode(keys::ROOM_SEND_QUEUE, IdbTransactionMode::Readwrite)?; @@ -1494,8 +1509,8 @@ impl_state_store!({ .get_all()? .await? .into_iter() - .map(|item| self.deserialize_value::>(&item)) - .collect::>, _>>()? + .map(|item| self.deserialize_value::>(&item)) + .collect::>, _>>()? .into_iter() .flat_map(|vec| vec.into_iter().map(|item| item.room_id)) .collect::>(); @@ -1503,12 +1518,12 @@ impl_state_store!({ Ok(all_entries.into_iter().collect()) } - async fn save_dependent_send_queue_event( + async fn save_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, own_txn_id: ChildTransactionId, - content: DependentQueuedEventKind, + content: DependentQueuedRequestKind, ) -> Result<()> { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); @@ -1519,21 +1534,21 @@ impl_state_store!({ let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; - // We store an encoded vector of the dependent events. + // We store an encoded vector of the dependent requests. // Reload the previous vector for this room, or create an empty one. let prev = obj.get(&encoded_key)?.await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), )?; - // Push the new event. - prev.push(DependentQueuedEvent { + // Push the new request. + prev.push(DependentQueuedRequest { kind: content, parent_transaction_id: parent_txn_id.to_owned(), own_transaction_id: own_txn_id, - event_id: None, + parent_key: None, }); // Save the new vector into db. @@ -1544,11 +1559,11 @@ impl_state_store!({ Ok(()) } - async fn update_dependent_send_queue_event( + async fn update_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, - event_id: OwnedEventId, + parent_key: SentRequestKey, ) -> Result { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); @@ -1559,19 +1574,19 @@ impl_state_store!({ let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; - // We store an encoded vector of the dependent events. + // We store an encoded vector of the dependent requests. // Reload the previous vector for this room, or create an empty one. let prev = obj.get(&encoded_key)?.await?; let mut prev = prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), )?; - // Modify all events that match. + // Modify all requests that match. let mut num_updated = 0; for entry in prev.iter_mut().filter(|entry| entry.parent_transaction_id == parent_txn_id) { - entry.event_id = Some(event_id.clone()); + entry.parent_key = Some(parent_key.clone()); num_updated += 1; } @@ -1583,7 +1598,7 @@ impl_state_store!({ Ok(num_updated) } - async fn remove_dependent_send_queue_event( + async fn remove_dependent_queued_request( &self, room_id: &RoomId, txn_id: &ChildTransactionId, @@ -1597,10 +1612,10 @@ impl_state_store!({ let obj = tx.object_store(keys::DEPENDENT_SEND_QUEUE)?; - // We store an encoded vector of the dependent events. + // We store an encoded vector of the dependent requests. // Reload the previous vector for this room. if let Some(val) = obj.get(&encoded_key)?.await? { - let mut prev = self.deserialize_value::>(&val)?; + let mut prev = self.deserialize_value::>(&val)?; if let Some(pos) = prev.iter().position(|item| item.own_transaction_id == *txn_id) { prev.remove(pos); @@ -1618,13 +1633,13 @@ impl_state_store!({ Ok(false) } - async fn list_dependent_send_queue_events( + async fn load_dependent_queued_requests( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result> { let encoded_key = self.encode_key(keys::DEPENDENT_SEND_QUEUE, room_id); - // We store an encoded vector of the dependent events. + // We store an encoded vector of the dependent requests. let prev = self .inner .transaction_on_one_with_mode( @@ -1637,7 +1652,7 @@ impl_state_store!({ prev.map_or_else( || Ok(Vec::new()), - |val| self.deserialize_value::>(&val), + |val| self.deserialize_value::>(&val), ) } }); @@ -1663,24 +1678,21 @@ impl From<&StrippedRoomMemberEvent> for RoomMember { #[cfg(test)] mod migration_tests { - use matrix_sdk_base::store::SerializableEventContent; + use assert_matches2::assert_matches; + use matrix_sdk_base::store::{QueuedRequestKind, SerializableEventContent}; use ruma::{ events::room::message::RoomMessageEventContent, room_id, OwnedRoomId, OwnedTransactionId, TransactionId, }; use serde::{Deserialize, Serialize}; - use crate::state_store::PersistedQueuedEvent; + use crate::state_store::PersistedQueuedRequest; #[derive(Serialize, Deserialize)] - struct OldPersistedQueuedEvent { - /// In which room is this event going to be sent. - pub room_id: OwnedRoomId, - - // All these fields are the same as in [`QueuedEvent`]. + struct OldPersistedQueuedRequest { + room_id: OwnedRoomId, event: SerializableEventContent, transaction_id: OwnedTransactionId, - is_wedged: bool, } @@ -1695,21 +1707,29 @@ mod migration_tests { SerializableEventContent::new(&RoomMessageEventContent::text_plain("Hello").into()) .unwrap(); - let old_persisted_queue_event = OldPersistedQueuedEvent { + let old_persisted_queue_event = OldPersistedQueuedRequest { room_id: room_a_id.to_owned(), event: content, - transaction_id, + transaction_id: transaction_id.clone(), is_wedged: true, }; - let serialized_persisted_event = serde_json::to_vec(&old_persisted_queue_event).unwrap(); + let serialized_persisted = serde_json::to_vec(&old_persisted_queue_event).unwrap(); // Load it with the new version. - let new_persisted: PersistedQueuedEvent = - serde_json::from_slice(&serialized_persisted_event).unwrap(); + let new_persisted: PersistedQueuedRequest = + serde_json::from_slice(&serialized_persisted).unwrap(); assert_eq!(new_persisted.is_wedged, Some(true)); assert!(new_persisted.error.is_none()); + + assert!(new_persisted.event.is_some()); + assert!(new_persisted.kind.is_none()); + + let queued = new_persisted.into_queued_request().unwrap(); + assert_matches!(queued.kind, QueuedRequestKind::Event { .. }); + assert_eq!(queued.transaction_id, transaction_id); + assert!(queued.error.is_some()); } } diff --git a/crates/matrix-sdk-sqlite/migrations/state_store/005_send_queue_dependent_events.sql b/crates/matrix-sdk-sqlite/migrations/state_store/005_send_queue_dependent_events.sql index bb522d2d008..03e65c05748 100644 --- a/crates/matrix-sdk-sqlite/migrations/state_store/005_send_queue_dependent_events.sql +++ b/crates/matrix-sdk-sqlite/migrations/state_store/005_send_queue_dependent_events.sql @@ -14,6 +14,6 @@ CREATE TABLE "dependent_send_queue_events" ( -- Used as a value (thus encrypted/decrypted), can be null. "event_id" BLOB NULL, - -- Serialized `DependentQueuedEventKind`, used as a value (thus encrypted/decrypted). + -- Serialized `DependentQueuedRequestKind`, used as a value (thus encrypted/decrypted). "content" BLOB NOT NULL ); diff --git a/crates/matrix-sdk-sqlite/migrations/state_store/008_send_queue.sql b/crates/matrix-sdk-sqlite/migrations/state_store/008_send_queue.sql new file mode 100644 index 00000000000..f6afcbe54af --- /dev/null +++ b/crates/matrix-sdk-sqlite/migrations/state_store/008_send_queue.sql @@ -0,0 +1,10 @@ +-- Delete all previous entries in the dependent send queue table, because the format changed. +DELETE FROM "dependent_send_queue_events"; + +-- Rename its "event_id" column to "parent_key", while we're at it. +ALTER TABLE "dependent_send_queue_events" + RENAME COLUMN "event_id" + TO "parent_key"; + +--- Delete all previous entries in the send queue, since the content's format has changed. +DELETE FROM "send_queue_events"; diff --git a/crates/matrix-sdk-sqlite/src/event_cache_store.rs b/crates/matrix-sdk-sqlite/src/event_cache_store.rs index 51afb9e5deb..5a2bf2462e8 100644 --- a/crates/matrix-sdk-sqlite/src/event_cache_store.rs +++ b/crates/matrix-sdk-sqlite/src/event_cache_store.rs @@ -155,6 +155,28 @@ impl EventCacheStore for SqliteEventCacheStore { Ok(()) } + async fn replace_media_key( + &self, + from: &MediaRequest, + to: &MediaRequest, + ) -> Result<(), Self::Error> { + let prev_uri = self.encode_key(keys::MEDIA, from.source.unique_key()); + let prev_format = self.encode_key(keys::MEDIA, from.format.unique_key()); + + let new_uri = self.encode_key(keys::MEDIA, to.source.unique_key()); + let new_format = self.encode_key(keys::MEDIA, to.format.unique_key()); + + let conn = self.acquire().await?; + conn.execute( + r#"UPDATE media SET uri = ?, format = ?, last_access = CAST(strftime('%s') as INT) + WHERE uri = ? AND format = ?"#, + (new_uri, new_format, prev_uri, prev_format), + ) + .await?; + + Ok(()) + } + async fn get_media_content(&self, request: &MediaRequest) -> Result>> { let uri = self.encode_key(keys::MEDIA, request.source.unique_key()); let format = self.encode_key(keys::MEDIA, request.format.unique_key()); diff --git a/crates/matrix-sdk-sqlite/src/state_store.rs b/crates/matrix-sdk-sqlite/src/state_store.rs index 53cd1bd7a00..4f453577651 100644 --- a/crates/matrix-sdk-sqlite/src/state_store.rs +++ b/crates/matrix-sdk-sqlite/src/state_store.rs @@ -11,8 +11,9 @@ use deadpool_sqlite::{Object as SqliteAsyncConn, Pool as SqlitePool, Runtime}; use matrix_sdk_base::{ deserialized_responses::{RawAnySyncOrStrippedState, SyncOrStrippedState}, store::{ - migration_helpers::RoomInfoV1, ChildTransactionId, DependentQueuedEvent, - DependentQueuedEventKind, QueueWedgeError, QueuedEvent, SerializableEventContent, + migration_helpers::RoomInfoV1, ChildTransactionId, DependentQueuedRequest, + DependentQueuedRequestKind, QueueWedgeError, QueuedRequest, QueuedRequestKind, + SentRequestKey, }, MinimalRoomMemberEvent, RoomInfo, RoomMemberships, RoomState, StateChanges, StateStore, StateStoreDataKey, StateStoreDataValue, @@ -68,7 +69,7 @@ mod keys { /// This is used to figure whether the sqlite database requires a migration. /// Every new SQL migration should imply a bump of this number, and changes in /// the [`SqliteStateStore::run_migrations`] function.. -const DATABASE_VERSION: u8 = 8; +const DATABASE_VERSION: u8 = 9; /// A sqlite based cryptostore. #[derive(Clone)] @@ -296,6 +297,16 @@ impl SqliteStateStore { }) .await?; } + + if from < 9 && to >= 9 { + conn.with_transaction(move |txn| { + // Run the migration. + txn.execute_batch(include_str!("../migrations/state_store/008_send_queue.sql"))?; + txn.set_db_version(9) + }) + .await?; + } + Ok(()) } @@ -1669,11 +1680,11 @@ impl StateStore for SqliteStateStore { .await } - async fn save_send_queue_event( + async fn save_send_queue_request( &self, room_id: &RoomId, transaction_id: OwnedTransactionId, - content: SerializableEventContent, + content: QueuedRequestKind, ) -> Result<(), Self::Error> { let room_id_key = self.encode_key(keys::SEND_QUEUE, room_id); let room_id_value = self.serialize_value(&room_id.to_owned())?; @@ -1694,11 +1705,11 @@ impl StateStore for SqliteStateStore { .await } - async fn update_send_queue_event( + async fn update_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, - content: SerializableEventContent, + content: QueuedRequestKind, ) -> Result { let room_id = self.encode_key(keys::SEND_QUEUE, room_id); @@ -1717,7 +1728,7 @@ impl StateStore for SqliteStateStore { Ok(num_updated > 0) } - async fn remove_send_queue_event( + async fn remove_send_queue_request( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -1741,10 +1752,10 @@ impl StateStore for SqliteStateStore { Ok(num_deleted > 0) } - async fn load_send_queue_events( + async fn load_send_queue_requests( &self, room_id: &RoomId, - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { let room_id = self.encode_key(keys::SEND_QUEUE, room_id); // Note: ROWID is always present and is an auto-incremented integer counter. We @@ -1763,19 +1774,19 @@ impl StateStore for SqliteStateStore { ) .await?; - let mut queued_events = Vec::with_capacity(res.len()); + let mut requests = Vec::with_capacity(res.len()); for entry in res { - queued_events.push(QueuedEvent { + requests.push(QueuedRequest { transaction_id: entry.0.into(), - event: self.deserialize_json(&entry.1)?, + kind: self.deserialize_json(&entry.1)?, error: entry.2.map(|v| self.deserialize_value(&v)).transpose()?, }); } - Ok(queued_events) + Ok(requests) } - async fn update_send_queue_event_status( + async fn update_send_queue_request_status( &self, room_id: &RoomId, transaction_id: &TransactionId, @@ -1798,7 +1809,7 @@ impl StateStore for SqliteStateStore { .await } - async fn load_rooms_with_unsent_events(&self) -> Result, Self::Error> { + async fn load_rooms_with_unsent_requests(&self) -> Result, Self::Error> { // If the values were not encrypted, we could use `SELECT DISTINCT` here, but we // have to manually do the deduplication: indeed, for all X, encrypt(X) // != encrypted(X), since we use a nonce in the encryption process. @@ -1821,12 +1832,12 @@ impl StateStore for SqliteStateStore { .collect()) } - async fn save_dependent_send_queue_event( + async fn save_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, own_txn_id: ChildTransactionId, - content: DependentQueuedEventKind, + content: DependentQueuedRequestKind, ) -> Result<()> { let room_id = self.encode_key(keys::DEPENDENTS_SEND_QUEUE, room_id); let content = self.serialize_json(&content)?; @@ -1849,14 +1860,14 @@ impl StateStore for SqliteStateStore { .await } - async fn update_dependent_send_queue_event( + async fn update_dependent_queued_request( &self, room_id: &RoomId, parent_txn_id: &TransactionId, - event_id: OwnedEventId, + parent_key: SentRequestKey, ) -> Result { let room_id = self.encode_key(keys::DEPENDENTS_SEND_QUEUE, room_id); - let event_id = self.serialize_value(&event_id)?; + let parent_key = self.serialize_value(&parent_key)?; // See comment in `save_send_queue_event`. let parent_txn_id = parent_txn_id.to_string(); @@ -1865,14 +1876,14 @@ impl StateStore for SqliteStateStore { .await? .with_transaction(move |txn| { Ok(txn.prepare_cached( - "UPDATE dependent_send_queue_events SET event_id = ? WHERE parent_transaction_id = ? and room_id = ?", + "UPDATE dependent_send_queue_events SET parent_key = ? WHERE parent_transaction_id = ? and room_id = ?", )? - .execute((event_id, parent_txn_id, room_id))?) + .execute((parent_key, parent_txn_id, room_id))?) }) .await } - async fn remove_dependent_send_queue_event( + async fn remove_dependent_queued_request( &self, room_id: &RoomId, txn_id: &ChildTransactionId, @@ -1896,10 +1907,10 @@ impl StateStore for SqliteStateStore { Ok(num_deleted > 0) } - async fn list_dependent_send_queue_events( + async fn load_dependent_queued_requests( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result> { let room_id = self.encode_key(keys::DEPENDENTS_SEND_QUEUE, room_id); // Note: transaction_id is not encoded, see why in `save_send_queue_event`. @@ -1907,7 +1918,7 @@ impl StateStore for SqliteStateStore { .acquire() .await? .prepare( - "SELECT own_transaction_id, parent_transaction_id, event_id, content FROM dependent_send_queue_events WHERE room_id = ? ORDER BY ROWID", + "SELECT own_transaction_id, parent_transaction_id, parent_key, content FROM dependent_send_queue_events WHERE room_id = ? ORDER BY ROWID", |mut stmt| { stmt.query((room_id,))? .mapped(|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?))) @@ -1918,10 +1929,10 @@ impl StateStore for SqliteStateStore { let mut dependent_events = Vec::with_capacity(res.len()); for entry in res { - dependent_events.push(DependentQueuedEvent { + dependent_events.push(DependentQueuedRequest { own_transaction_id: entry.0.into(), parent_transaction_id: entry.1.into(), - event_id: entry.2.map(|bytes| self.deserialize_value(&bytes)).transpose()?, + parent_key: entry.2.map(|bytes| self.deserialize_value(&bytes)).transpose()?, kind: self.deserialize_json(&entry.3)?, }); } @@ -1999,9 +2010,8 @@ mod migration_tests { }, }; - use assert_matches::assert_matches; use matrix_sdk_base::{ - store::{QueueWedgeError, SerializableEventContent}, + store::{ChildTransactionId, DependentQueuedRequestKind, SerializableEventContent}, sync::UnreadNotificationsCount, RoomState, StateStore, }; @@ -2262,10 +2272,10 @@ mod migration_tests { } #[async_test] - pub async fn test_migrating_v7_to_v8() { + pub async fn test_migrating_v7_to_v9() { let path = new_path(); - let room_a_id = room_id!("!room_a:dummy.local"); + let room_id = room_id!("!room_a:dummy.local"); let wedged_event_transaction_id = TransactionId::new(); let local_event_transaction_id = TransactionId::new(); @@ -2277,38 +2287,33 @@ mod migration_tests { let wedge_tx = wedged_event_transaction_id.clone(); let local_tx = local_event_transaction_id.clone(); - conn.with_transaction(move |txn| { - add_send_queue_event_v7(&db, txn, &wedge_tx, room_a_id, true)?; - add_send_queue_event_v7(&db, txn, &local_tx, room_a_id, false)?; + db.save_dependent_queued_request( + room_id, + &local_tx, + ChildTransactionId::new(), + DependentQueuedRequestKind::RedactEvent, + ) + .await + .unwrap(); + conn.with_transaction(move |txn| { + add_send_queue_event_v7(&db, txn, &wedge_tx, room_id, true)?; + add_send_queue_event_v7(&db, txn, &local_tx, room_id, false)?; Result::<_, Error>::Ok(()) }) .await .unwrap(); } - // This transparently migrates to the latest version. + // This transparently migrates to the latest version, which clears up all + // requests and dependent requests. let store = SqliteStateStore::open(path, Some(SECRET)).await.unwrap(); - let queued_event = store.load_send_queue_events(room_a_id).await.unwrap(); - - assert_eq!(queued_event.len(), 2); - let migrated_wedged = - queued_event.iter().find(|e| e.transaction_id == wedged_event_transaction_id).unwrap(); - - assert!(migrated_wedged.is_wedged()); - assert_matches!( - migrated_wedged.error.clone(), - Some(QueueWedgeError::GenericApiError { .. }) - ); - - let migrated_ok = queued_event - .iter() - .find(|e| e.transaction_id == local_event_transaction_id.clone()) - .unwrap(); + let requests = store.load_send_queue_requests(room_id).await.unwrap(); + assert!(requests.is_empty()); - assert!(!migrated_ok.is_wedged()); - assert!(migrated_ok.error.is_none()); + let dependent_requests = store.load_dependent_queued_requests(room_id).await.unwrap(); + assert!(dependent_requests.is_empty()); } fn add_send_queue_event_v7( diff --git a/crates/matrix-sdk-ui/src/room_list_service/mod.rs b/crates/matrix-sdk-ui/src/room_list_service/mod.rs index 0fd2ba165f3..5a52f610757 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/mod.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/mod.rs @@ -120,21 +120,10 @@ impl RoomListService { /// already pre-configured. /// /// This won't start an encryption sync, and it's the user's responsibility - /// to create one in this case using `EncryptionSync`. + /// to create one in this case using + /// [`EncryptionSyncService`][crate::encryption_sync_service::EncryptionSyncService]. pub async fn new(client: Client) -> Result { - Self::new_internal(client, false).await - } - - /// Create a new `RoomList` that enables encryption. - /// - /// This will include syncing the encryption information, so there must not - /// be any instance of `EncryptionSync` running in the background. - pub async fn new_with_encryption(client: Client) -> Result { - Self::new_internal(client, true).await - } - - async fn new_internal(client: Client, with_encryption: bool) -> Result { - let mut builder = client + let builder = client .sliding_sync("room-list") .map_err(Error::SlidingSync)? .with_account_data_extension( @@ -146,17 +135,9 @@ impl RoomListService { })) .with_typing_extension(assign!(http::request::Typing::default(), { enabled: Some(true), - })); - - if with_encryption { - builder = builder - .with_e2ee_extension( - assign!(http::request::E2EE::default(), { enabled: Some(true) }), - ) - .with_to_device_extension( - assign!(http::request::ToDevice::default(), { enabled: Some(true) }), - ); - } + })) + // We don't deal with encryption device messages here so this is safe + .share_pos(); let sliding_sync = builder .add_cached_list( @@ -432,7 +413,7 @@ impl RoomListService { #[derive(Debug, Error)] pub enum Error { /// Error from [`matrix_sdk::SlidingSync`]. - #[error("SlidingSync failed: {0}")] + #[error(transparent)] SlidingSync(SlidingSyncError), /// An operation has been requested on an unknown list. @@ -446,10 +427,10 @@ pub enum Error { #[error("A timeline instance already exists for room {0}")] TimelineAlreadyExists(OwnedRoomId), - #[error("An error occurred while initializing the timeline")] - InitializingTimeline(#[source] timeline::Error), + #[error(transparent)] + InitializingTimeline(#[from] timeline::Error), - #[error("The attached event cache ran into an error")] + #[error(transparent)] EventCache(#[from] EventCacheError), } @@ -577,23 +558,6 @@ mod tests { Ok(()) } - #[async_test] - async fn test_no_to_device_and_e2ee_if_not_explicitly_set() -> Result<(), Error> { - let (client, _) = new_client().await; - - let no_encryption = RoomListService::new(client.clone()).await?; - let extensions = no_encryption.sliding_sync.extensions_config(); - assert_eq!(extensions.e2ee.enabled, None); - assert_eq!(extensions.to_device.enabled, None); - - let with_encryption = RoomListService::new_with_encryption(client).await?; - let extensions = with_encryption.sliding_sync.extensions_config(); - assert_eq!(extensions.e2ee.enabled, Some(true)); - assert_eq!(extensions.to_device.enabled, Some(true)); - - Ok(()) - } - #[async_test] async fn test_expire_sliding_sync_session_manually() -> Result<(), Error> { let (client, server) = new_client().await; diff --git a/crates/matrix-sdk-ui/src/room_list_service/room_list.rs b/crates/matrix-sdk-ui/src/room_list_service/room_list.rs index 8b4ecf9ab4c..8056c14dac8 100644 --- a/crates/matrix-sdk-ui/src/room_list_service/room_list.rs +++ b/crates/matrix-sdk-ui/src/room_list_service/room_list.rs @@ -114,8 +114,10 @@ impl RoomList { } /// Get a subscriber to the room list loading state. + /// + /// This method will send out the current loading state as the first update. pub fn loading_state(&self) -> Subscriber { - self.loading_state.subscribe() + self.loading_state.subscribe_reset() } /// Get a stream of rooms. @@ -144,8 +146,8 @@ impl RoomList { pub fn entries_with_dynamic_adapters( &self, page_size: usize, - room_info_notable_update_receiver: broadcast::Receiver, ) -> (impl Stream>> + '_, RoomListDynamicEntriesController) { + let room_info_notable_update_receiver = self.client.room_info_notable_update_receiver(); let list = self.sliding_sync_list.clone(); let filter_fn_cell = AsyncCell::shared(); diff --git a/crates/matrix-sdk-ui/src/timeline/event_handler.rs b/crates/matrix-sdk-ui/src/timeline/event_handler.rs index 228eb96e606..20358cae735 100644 --- a/crates/matrix-sdk-ui/src/timeline/event_handler.rs +++ b/crates/matrix-sdk-ui/src/timeline/event_handler.rs @@ -547,25 +547,7 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { let internal_id = item.internal_id.to_owned(); // Update all events that replied to this message with the edited content. - self.items.for_each(|mut entry| { - let Some(event_item) = entry.as_event() else { return }; - let Some(message) = event_item.content.as_message() else { return }; - let Some(in_reply_to) = message.in_reply_to() else { return }; - if replacement.event_id == in_reply_to.event_id { - trace!(reply_event_id = ?event_item.identifier(), "Updating response to edited event"); - let in_reply_to = InReplyToDetails { - event_id: in_reply_to.event_id.clone(), - event: TimelineDetails::Ready(Box::new( - RepliedToEvent::from_timeline_item(&new_item), - )), - }; - let new_reply_content = - TimelineItemContent::Message(message.with_in_reply_to(in_reply_to)); - let new_reply_item = - entry.with_kind(event_item.with_content(new_reply_content, None)); - ObservableVectorTransactionEntry::set(&mut entry, new_reply_item); - } - }); + Self::maybe_update_responses(self.items, &replacement.event_id, &new_item); // Update the event itself. self.items.set(item_pos, TimelineItem::new(new_item, internal_id)); @@ -945,7 +927,13 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { debug!("event item is already redacted"); } else { let new_item = item.redact(&self.meta.room_version); - self.items.set(idx, TimelineItem::new(new_item, item.internal_id.to_owned())); + let internal_id = item.internal_id.to_owned(); + + // Look for any timeline event that's a reply to the redacted event, and redact + // the replied-to event there as well. + Self::maybe_update_responses(self.items, &redacted, &new_item); + + self.items.set(idx, TimelineItem::new(new_item, internal_id)); self.result.items_updated += 1; } } else { @@ -954,26 +942,6 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { } else { debug!("Timeline item not found, discarding redaction"); }; - - // Look for any timeline event that's a reply to the redacted event, and redact - // the replied-to event there as well. - self.items.for_each(|mut entry| { - let Some(event_item) = entry.as_event() else { return }; - let Some(message) = event_item.content.as_message() else { return }; - let Some(in_reply_to) = message.in_reply_to() else { return }; - let TimelineDetails::Ready(replied_to_event) = &in_reply_to.event else { return }; - if redacted == in_reply_to.event_id { - let replied_to_event = replied_to_event.redact(&self.meta.room_version); - let in_reply_to = InReplyToDetails { - event_id: in_reply_to.event_id.clone(), - event: TimelineDetails::Ready(Box::new(replied_to_event)), - }; - let content = TimelineItemContent::Message(message.with_in_reply_to(in_reply_to)); - let new_item = entry.with_kind(event_item.with_content(content, None)); - - ObservableVectorTransactionEntry::set(&mut entry, new_item); - } - }); } /// Attempts to redact a reaction, local or remote. @@ -1190,8 +1158,16 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { } } - Flow::Remote { position: TimelineItemPosition::Update(idx), .. } => { + Flow::Remote { + event_id: decrypted_event_id, + position: TimelineItemPosition::Update(idx), + .. + } => { trace!("Updating timeline item at position {idx}"); + + // Update all events that replied to this previously encrypted message. + Self::maybe_update_responses(self.items, decrypted_event_id, &item); + let internal_id = self.items[*idx].internal_id.clone(); self.items.set(*idx, TimelineItem::new(item, internal_id)); } @@ -1203,6 +1179,34 @@ impl<'a, 'o> TimelineEventHandler<'a, 'o> { } } + /// After updating the timeline item `new_item` which id is + /// `target_event_id`, update other items that are responses to this item. + fn maybe_update_responses( + items: &mut ObservableVectorTransaction<'_, Arc>, + target_event_id: &EventId, + new_item: &EventTimelineItem, + ) { + items.for_each(|mut entry| { + let Some(event_item) = entry.as_event() else { return }; + let Some(message) = event_item.content.as_message() else { return }; + let Some(in_reply_to) = message.in_reply_to() else { return }; + if target_event_id == in_reply_to.event_id { + trace!(reply_event_id = ?event_item.identifier(), "Updating response to edited event"); + let in_reply_to = InReplyToDetails { + event_id: in_reply_to.event_id.clone(), + event: TimelineDetails::Ready(Box::new( + RepliedToEvent::from_timeline_item(new_item), + )), + }; + let new_reply_content = + TimelineItemContent::Message(message.with_in_reply_to(in_reply_to)); + let new_reply_item = + entry.with_kind(event_item.with_content(new_reply_content, None)); + ObservableVectorTransactionEntry::set(&mut entry, new_reply_item); + } + }); + } + fn pending_reactions( &mut self, content: &TimelineItemContent, diff --git a/crates/matrix-sdk-ui/src/timeline/event_item/content/message.rs b/crates/matrix-sdk-ui/src/timeline/event_item/content/message.rs index 4e361e3862b..09ee071f923 100644 --- a/crates/matrix-sdk-ui/src/timeline/event_item/content/message.rs +++ b/crates/matrix-sdk-ui/src/timeline/event_item/content/message.rs @@ -35,7 +35,7 @@ use ruma::{ }, html::RemoveReplyFallback, serde::Raw, - OwnedEventId, OwnedUserId, RoomVersionId, UserId, + OwnedEventId, OwnedUserId, UserId, }; use tracing::{error, trace}; @@ -337,14 +337,6 @@ impl RepliedToEvent { } } - pub(in crate::timeline) fn redact(&self, room_version: &RoomVersionId) -> Self { - Self { - content: self.content.redact(room_version), - sender: self.sender.clone(), - sender_profile: self.sender_profile.clone(), - } - } - /// Try to create a `RepliedToEvent` from a `TimelineEvent` by providing the /// room. pub async fn try_from_timeline_event_for_room( diff --git a/crates/matrix-sdk-ui/src/timeline/mod.rs b/crates/matrix-sdk-ui/src/timeline/mod.rs index 93168e1732a..b5bd98b10c2 100644 --- a/crates/matrix-sdk-ui/src/timeline/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/mod.rs @@ -729,10 +729,18 @@ impl Timeline { /// Adds a new pinned event by sending an updated `m.room.pinned_events` /// event containing the new event id. /// - /// Returns `true` if we sent the request, `false` if the event was already + /// This method will first try to get the pinned events from the current + /// room's state and if it fails to do so it'll try to load them from the + /// homeserver. + /// + /// Returns `true` if we pinned the event, `false` if the event was already /// pinned. pub async fn pin_event(&self, event_id: &EventId) -> Result { - let mut pinned_event_ids = self.room().pinned_event_ids(); + let mut pinned_event_ids = if let Some(event_ids) = self.room().pinned_event_ids() { + event_ids + } else { + self.room().load_pinned_events().await?.unwrap_or_default() + }; let event_id = event_id.to_owned(); if pinned_event_ids.contains(&event_id) { Ok(false) @@ -744,13 +752,21 @@ impl Timeline { } } - /// Adds a new pinned event by sending an updated `m.room.pinned_events` + /// Removes a pinned event by sending an updated `m.room.pinned_events` /// event without the event id we want to remove. /// - /// Returns `true` if we sent the request, `false` if the event wasn't - /// pinned. + /// This method will first try to get the pinned events from the current + /// room's state and if it fails to do so it'll try to load them from the + /// homeserver. + /// + /// Returns `true` if we unpinned the event, `false` if the event wasn't + /// pinned before. pub async fn unpin_event(&self, event_id: &EventId) -> Result { - let mut pinned_event_ids = self.room().pinned_event_ids(); + let mut pinned_event_ids = if let Some(event_ids) = self.room().pinned_event_ids() { + event_ids + } else { + self.room().load_pinned_events().await?.unwrap_or_default() + }; let event_id = event_id.to_owned(); if let Some(idx) = pinned_event_ids.iter().position(|e| *e == *event_id) { pinned_event_ids.remove(idx); diff --git a/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs b/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs index 450ccae0e94..1deb853607d 100644 --- a/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs +++ b/crates/matrix-sdk-ui/src/timeline/pinned_events_loader.rs @@ -61,6 +61,7 @@ impl PinnedEventsLoader { let pinned_event_ids: Vec = self .room .pinned_event_ids() + .unwrap_or_default() .into_iter() .rev() .take(self.max_events_to_load) @@ -134,7 +135,7 @@ pub trait PinnedEventsRoom: SendOutsideWasm + SyncOutsideWasm { ) -> BoxFuture<'a, Result<(SyncTimelineEvent, Vec), PaginatorError>>; /// Get the pinned event ids for a room. - fn pinned_event_ids(&self) -> Vec; + fn pinned_event_ids(&self) -> Option>; /// Checks whether an event id is pinned in this room. /// @@ -168,7 +169,7 @@ impl PinnedEventsRoom for Room { .boxed() } - fn pinned_event_ids(&self) -> Vec { + fn pinned_event_ids(&self) -> Option> { self.clone_info().pinned_event_ids() } diff --git a/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs b/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs index 0bf9d80d4ce..d7e0f812d6f 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/encryption.rs @@ -20,17 +20,19 @@ use std::{ sync::{Arc, Mutex}, }; +use as_variant::as_variant; use assert_matches::assert_matches; use assert_matches2::assert_let; use eyeball_im::VectorDiff; use matrix_sdk::{ + assert_next_matches_with_timeout, crypto::{decrypt_room_key_export, types::events::UtdCause, OlmMachine}, test_utils::test_client_builder, }; use matrix_sdk_base::deserialized_responses::{SyncTimelineEvent, UnableToDecryptReason}; -use matrix_sdk_test::{async_test, BOB}; +use matrix_sdk_test::{async_test, ALICE, BOB}; use ruma::{ - assign, + assign, event_id, events::room::encrypted::{ EncryptedEventScheme, MegolmV1AesSha2ContentInit, Relation, Replacement, RoomEncryptedEventContent, @@ -44,7 +46,7 @@ use stream_assert::assert_next_matches; use super::TestTimeline; use crate::{ - timeline::{EncryptedMessage, TimelineItemContent}, + timeline::{EncryptedMessage, TimelineDetails, TimelineItemContent}, unable_to_decrypt_hook::{UnableToDecryptHook, UnableToDecryptInfo, UtdHookManager}, }; @@ -557,6 +559,128 @@ async fn test_utd_cause_for_missing_membership_is_unknown() { assert_eq!(*cause, UtdCause::Unknown); } +#[async_test] +async fn test_retry_decryption_updates_response() { + const SESSION_ID: &str = "gM8i47Xhu0q52xLfgUXzanCMpLinoyVyH7R58cBuVBU"; + const SESSION_KEY: &[u8] = b"\ + -----BEGIN MEGOLM SESSION DATA-----\n\ + ASKcWoiAVUM97482UAi83Avce62hSLce7i5JhsqoF6xeAAAACqt2Cg3nyJPRWTTMXxXH7TXnkfdlmBXbQtq5\ + bpHo3LRijcq2Gc6TXilESCmJN14pIsfKRJrWjZ0squ/XsoTFytuVLWwkNaW3QF6obeg2IoVtJXLMPdw3b2vO\ + vgwGY3OMP0XafH13j1vcb6YLzvgLkZQLnYvd47hv3yK/9GmKS9tokuaQ7dCVYckYcIOS09EDTs70YdxUd5WG\ + rQynATCLFP1p/NAGv70r9MK7Cy/mNpjD0r4qC7UEDIoi1kOWzHgnLo19wtvwsb8Fg8ATxcs3Wmtj8hIUYpDx\ + ia4sM10zbytUuaPUAfCDf42IyxdmOnGe1CueXhgI71y+RW0s0argNqUt7jB70JT0o9CyX6UBGRaqLk2MPY9T\ + hUu5J8X3UgIa6rcbWigzohzWm9rdbEHFrSWqjpfQYMaAKQQgETrjSy4XTrp2RhC2oNqG/hylI4ab+F4X6fpH\ + DYP1NqNMP5g36xNu7LhDnrUB5qsPjYOmWORxGLfudpF3oLYCSlr3DgHqEIB6HjQblLZ3KQuPBse3zxyROTnS\ + AhdPH4a/z1wioFtKNVph3hecsiKEdqnz4Y2coSIdhz58mJ9JWNQoFAENE5CSsoEZAGvafYZVpW4C75YY2zq1\ + wIeiFi1dT43/jLAUGkslsi1VvnyfUu8qO404RxYO3XHoGLMFoFLOO+lZ+VGci2Vz10AhxJhEBHxRKxw4k2uB\ + HztoSJUr/2Y\n\ + -----END MEGOLM SESSION DATA-----"; + + let timeline = TestTimeline::new(); + let mut stream = timeline.subscribe_events().await; + + let original_event_id = event_id!("$original"); + let f = &timeline.factory; + timeline + .handle_live_event( + f.event(RoomEncryptedEventContent::new( + EncryptedEventScheme::MegolmV1AesSha2( + MegolmV1AesSha2ContentInit { + ciphertext: "\ + AwgAEtABPRMavuZMDJrPo6pGQP4qVmpcuapuXtzKXJyi3YpEsjSWdzuRKIgJzD4P\ + cSqJM1A8kzxecTQNJsC5q22+KSFEPxPnI4ltpm7GFowSoPSW9+bFdnlfUzEP1jPq\ + YevHAsMJp2fRKkzQQbPordrUk1gNqEpGl4BYFeRqKl9GPdKFwy45huvQCLNNueql\ + CFZVoYMuhxrfyMiJJAVNTofkr2um2mKjDTlajHtr39pTG8k0eOjSXkLOSdZvNOMz\ + hGhSaFNeERSA2G2YbeknOvU7MvjiO0AKuxaAe1CaVhAI14FCgzrJ8g0y5nly+n7x\ + QzL2G2Dn8EoXM5Iqj8W99iokQoVsSrUEnaQ1WnSIfewvDDt4LCaD/w7PGETMCQ" + .to_owned(), + sender_key: "DeHIg4gwhClxzFYcmNntPNF9YtsdZbmMy8+3kzCMXHA".to_owned(), + device_id: "NLAZCWIOCO".into(), + session_id: SESSION_ID.into(), + } + .into(), + ), + None, + )) + .event_id(original_event_id) + .sender(&BOB) + .into_utd_sync_timeline_event(), + ) + .await; + + timeline + .handle_live_event(f.text_msg("well said!").reply_to(original_event_id).sender(&ALICE)) + .await; + + // We receive the UTD. + { + let event = assert_next_matches!(stream, VectorDiff::PushBack { value } => value); + assert_let!( + TimelineItemContent::UnableToDecrypt(EncryptedMessage::MegolmV1AesSha2 { + session_id, + .. + }) = event.content() + ); + assert_eq!(session_id, SESSION_ID); + } + + // We receive the text response. + { + let event = assert_next_matches!(stream, VectorDiff::PushBack { value } => value); + let msg = event.content().as_message().unwrap(); + assert_eq!(msg.body(), "well said!"); + + let reply_details = msg.in_reply_to().unwrap(); + assert_eq!(reply_details.event_id, original_event_id); + + let replied_to = as_variant!(&reply_details.event, TimelineDetails::Ready).unwrap(); + assert!(replied_to.content.as_unable_to_decrypt().is_some()); + } + + // Import a room key backup. + let own_user_id = user_id!("@example:morheus.localhost"); + let exported_keys = decrypt_room_key_export(Cursor::new(SESSION_KEY), "1234").unwrap(); + + let olm_machine = OlmMachine::new(own_user_id, "SomeDeviceId".into()).await; + olm_machine.store().import_exported_room_keys(exported_keys, |_, _| {}).await.unwrap(); + + // Retry decrypting the UTD. + timeline + .controller + .retry_event_decryption_test( + room_id!("!DovneieKSTkdHKpIXy:morpheus.localhost"), + olm_machine, + Some(iter::once(SESSION_ID.to_owned()).collect()), + ) + .await; + + // The response is updated. + { + let event = assert_next_matches_with_timeout!( + stream, + VectorDiff::Set { index: 1, value } => value + ); + + let msg = event.content().as_message().unwrap(); + assert_eq!(msg.body(), "well said!"); + + let reply_details = msg.in_reply_to().unwrap(); + assert_eq!(reply_details.event_id, original_event_id); + + let replied_to = as_variant!(&reply_details.event, TimelineDetails::Ready).unwrap(); + assert_eq!(replied_to.content.as_message().unwrap().body(), "It's a secret to everybody"); + } + + // The event itself is decrypted. + { + let event = assert_next_matches!(stream, VectorDiff::Set { index: 0, value } => value); + assert_matches!(event.encryption_info(), Some(_)); + assert_let!(TimelineItemContent::Message(message) = event.content()); + assert_eq!(message.body(), "It's a secret to everybody"); + assert!(!event.is_highlighted()); + } +} + fn utd_event_with_unsigned(unsigned: serde_json::Value) -> SyncTimelineEvent { let raw = Raw::from_json( to_raw_value(&json!({ diff --git a/crates/matrix-sdk-ui/src/timeline/tests/mod.rs b/crates/matrix-sdk-ui/src/timeline/tests/mod.rs index 26cadd3b6dc..356a2ad2228 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/mod.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/mod.rs @@ -351,7 +351,7 @@ impl PinnedEventsRoom for TestRoomDataProvider { unimplemented!(); } - fn pinned_event_ids(&self) -> Vec { + fn pinned_event_ids(&self) -> Option> { unimplemented!(); } diff --git a/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs b/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs index a42b6f3afcf..e592660f074 100644 --- a/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs +++ b/crates/matrix-sdk-ui/src/timeline/tests/redaction.rs @@ -91,17 +91,17 @@ async fn test_redact_replied_to_event() { timeline.handle_live_event(f.redaction(first_item.event_id().unwrap()).sender(&ALICE)).await; - let first_item_again = - assert_next_matches!(stream, VectorDiff::Set { index: 0, value } => value); - assert_matches!(first_item_again.content(), TimelineItemContent::RedactedMessage); - assert_matches!(first_item_again.original_json(), None); - let second_item_again = assert_next_matches!(stream, VectorDiff::Set { index: 1, value } => value); let message = second_item_again.content().as_message().unwrap(); let in_reply_to = message.in_reply_to().unwrap(); assert_let!(TimelineDetails::Ready(replied_to_event) = &in_reply_to.event); assert_matches!(replied_to_event.content(), TimelineItemContent::RedactedMessage); + + let first_item_again = + assert_next_matches!(stream, VectorDiff::Set { index: 0, value } => value); + assert_matches!(first_item_again.content(), TimelineItemContent::RedactedMessage); + assert_matches!(first_item_again.original_json(), None); } #[async_test] diff --git a/crates/matrix-sdk-ui/tests/integration/room_list_service.rs b/crates/matrix-sdk-ui/tests/integration/room_list_service.rs index f586c528927..0c9429203e3 100644 --- a/crates/matrix-sdk-ui/tests/integration/room_list_service.rs +++ b/crates/matrix-sdk-ui/tests/integration/room_list_service.rs @@ -6,7 +6,11 @@ use std::{ use assert_matches::assert_matches; use eyeball_im::VectorDiff; use futures_util::{pin_mut, FutureExt, StreamExt}; -use matrix_sdk::{test_utils::logged_in_client_with_server, Client}; +use matrix_sdk::{ + config::RequestConfig, + test_utils::{logged_in_client_with_server, set_client_session, test_client_builder}, + Client, +}; use matrix_sdk_base::sync::UnreadNotificationsCount; use matrix_sdk_test::{async_test, mocks::mock_encryption_state}; use matrix_sdk_ui::{ @@ -23,6 +27,7 @@ use ruma::{ }; use serde_json::json; use stream_assert::{assert_next_matches, assert_pending}; +use tempfile::TempDir; use tokio::{spawn, sync::mpsc::channel, task::yield_now}; use wiremock::{ matchers::{header, method, path}, @@ -38,12 +43,30 @@ async fn new_room_list_service() -> Result<(Client, MockServer, RoomListService) Ok((client, server, room_list)) } +async fn new_persistent_room_list_service( + store_path: &std::path::Path, +) -> Result<(MockServer, RoomListService), Error> { + let server = MockServer::start().await; + let client = test_client_builder(Some(server.uri().to_string())) + .request_config(RequestConfig::new().disable_retry()) + .sqlite_store(store_path, None) + .build() + .await + .unwrap(); + set_client_session(&client).await; + + let room_list = RoomListService::new(client.clone()).await?; + + Ok((server, room_list)) +} + // Same macro as in the main, with additional checking that the state // before/after the sync loop match those we expect. macro_rules! sync_then_assert_request_and_fake_response { ( [$server:ident, $room_list:ident, $stream:ident] $( states = $pre_state:pat => $post_state:pat, )? + $( assert pos $pos:expr, )? assert request $assert_request:tt { $( $request_json:tt )* }, respond with = $( ( code $code:expr ) )? { $( $response_json:tt )* } $( , after delay = $response_delay:expr )? @@ -53,6 +76,7 @@ macro_rules! sync_then_assert_request_and_fake_response { [$server, $room_list, $stream] sync matches Some(Ok(_)), $( states = $pre_state => $post_state, )? + $( assert pos $pos, )? assert request $assert_request { $( $request_json )* }, respond with = $( ( code $code ) )? { $( $response_json )* }, $( after delay = $response_delay, )? @@ -63,6 +87,7 @@ macro_rules! sync_then_assert_request_and_fake_response { [$server:ident, $room_list:ident, $stream:ident] sync matches $sync_result:pat, $( states = $pre_state:pat => $post_state:pat, )? + $( assert pos $pos:expr, )? assert request $assert_request:tt { $( $request_json:tt )* }, respond with = $( ( code $code:expr ) )? { $( $response_json:tt )* } $( , after delay = $response_delay:expr )? @@ -80,6 +105,7 @@ macro_rules! sync_then_assert_request_and_fake_response { let next = super::sliding_sync_then_assert_request_and_fake_response! { [$server, $stream] sync matches $sync_result, + $( assert pos $pos, )? assert request $assert_request { $( $request_json )* }, respond with = $( ( code $code ) )? { $( $response_json )* }, $( after delay = $response_delay, )? @@ -481,6 +507,7 @@ async fn test_sync_resumes_from_previous_state() -> Result<(), Error> { sync_then_assert_request_and_fake_response! { [server, room_list, sync] states = Init => SettingUp, + assert pos None::, assert request >= { "lists": { ALL_ROOMS: { @@ -509,6 +536,7 @@ async fn test_sync_resumes_from_previous_state() -> Result<(), Error> { sync_then_assert_request_and_fake_response! { [server, room_list, sync] states = SettingUp => Running, + assert pos Some("0"), assert request >= { "lists": { ALL_ROOMS: { @@ -537,6 +565,7 @@ async fn test_sync_resumes_from_previous_state() -> Result<(), Error> { sync_then_assert_request_and_fake_response! { [server, room_list, sync] states = Running => Running, + assert pos Some("1"), assert request >= { "lists": { ALL_ROOMS: { @@ -560,6 +589,101 @@ async fn test_sync_resumes_from_previous_state() -> Result<(), Error> { Ok(()) } +#[async_test] +async fn test_sync_resumes_from_previous_state_after_restart() -> Result<(), Error> { + let tmp_dir = TempDir::new().unwrap(); + let store_path = tmp_dir.path(); + + { + let (server, room_list) = new_persistent_room_list_service(store_path).await?; + let sync = room_list.sync(); + pin_mut!(sync); + + let all_rooms = room_list.all_rooms().await?; + let mut all_rooms_loading_state = all_rooms.loading_state(); + + // The loading is not loaded. + assert_next_matches!(all_rooms_loading_state, RoomListLoadingState::NotLoaded); + assert_pending!(all_rooms_loading_state); + + sync_then_assert_request_and_fake_response! { + [server, room_list, sync] + states = Init => SettingUp, + assert pos None::, + assert request >= { + "lists": { + ALL_ROOMS: { + "ranges": [[0, 19]], + }, + }, + }, + respond with = { + "pos": "0", + "lists": { + ALL_ROOMS: { + "count": 10, + }, + }, + "rooms": {}, + }, + }; + } + + { + let (server, room_list) = new_persistent_room_list_service(store_path).await?; + let sync = room_list.sync(); + pin_mut!(sync); + + let all_rooms = room_list.all_rooms().await?; + let mut all_rooms_loading_state = all_rooms.loading_state(); + + // Wait on Tokio to run all the tasks. Necessary only when testing. + yield_now().await; + + // We already have a state stored so the list should already be loaded + assert_next_matches!( + all_rooms_loading_state, + RoomListLoadingState::Loaded { maximum_number_of_rooms: Some(10) } + ); + assert_pending!(all_rooms_loading_state); + + // pos has been restored and is used when doing the req + sync_then_assert_request_and_fake_response! { + [server, room_list, sync] + states = Init => SettingUp, + assert pos Some("0"), + assert request >= { + "lists": { + ALL_ROOMS: { + "ranges": [[0, 19]], + }, + }, + }, + respond with = { + "pos": "1", + "lists": { + ALL_ROOMS: { + "count": 12, + }, + }, + "rooms": {}, + }, + }; + + // Wait on Tokio to run all the tasks. Necessary only when testing. + yield_now().await; + + // maximum_number_of_rooms changed so we should get a new loaded state + assert_next_matches!( + all_rooms_loading_state, + RoomListLoadingState::Loaded { maximum_number_of_rooms: Some(12) } + ); + assert_pending!(all_rooms_loading_state); + } + + Ok(()) +} + #[async_test] async fn test_sync_resumes_from_error() -> Result<(), Error> { let (_, server, room_list) = new_room_list_service().await?; @@ -1046,8 +1170,7 @@ async fn test_loading_states() -> Result<(), Error> { let mut all_rooms_loading_state = all_rooms.loading_state(); // The loading is not loaded. - assert_matches!(all_rooms_loading_state.get(), RoomListLoadingState::NotLoaded); - assert_pending!(all_rooms_loading_state); + assert_next_matches!(all_rooms_loading_state, RoomListLoadingState::NotLoaded); sync_then_assert_request_and_fake_response! { [server, room_list, sync] @@ -1153,11 +1276,10 @@ async fn test_loading_states() -> Result<(), Error> { pin_mut!(sync); // The loading state is loaded! Indeed, there is data loaded from the cache. - assert_matches!( - all_rooms_loading_state.get(), + assert_next_matches!( + all_rooms_loading_state, RoomListLoadingState::Loaded { maximum_number_of_rooms: Some(12) } ); - assert_pending!(all_rooms_loading_state); sync_then_assert_request_and_fake_response! { [server, room_list, sync] @@ -1196,15 +1318,14 @@ async fn test_loading_states() -> Result<(), Error> { #[async_test] async fn test_dynamic_entries_stream() -> Result<(), Error> { - let (client, server, room_list) = new_room_list_service().await?; + let (_client, server, room_list) = new_room_list_service().await?; let sync = room_list.sync(); pin_mut!(sync); let all_rooms = room_list.all_rooms().await?; - let (dynamic_entries_stream, dynamic_entries) = - all_rooms.entries_with_dynamic_adapters(5, client.room_info_notable_update_receiver()); + let (dynamic_entries_stream, dynamic_entries) = all_rooms.entries_with_dynamic_adapters(5); pin_mut!(dynamic_entries_stream); sync_then_assert_request_and_fake_response! { @@ -1599,15 +1720,14 @@ async fn test_dynamic_entries_stream() -> Result<(), Error> { #[async_test] async fn test_room_sorting() -> Result<(), Error> { - let (client, server, room_list) = new_room_list_service().await?; + let (_client, server, room_list) = new_room_list_service().await?; let sync = room_list.sync(); pin_mut!(sync); let all_rooms = room_list.all_rooms().await?; - let (stream, dynamic_entries) = - all_rooms.entries_with_dynamic_adapters(10, client.room_info_notable_update_receiver()); + let (stream, dynamic_entries) = all_rooms.entries_with_dynamic_adapters(10); pin_mut!(stream); sync_then_assert_request_and_fake_response! { diff --git a/crates/matrix-sdk-ui/tests/integration/sliding_sync.rs b/crates/matrix-sdk-ui/tests/integration/sliding_sync.rs index b267fa96bb7..5ec1dda2c00 100644 --- a/crates/matrix-sdk-ui/tests/integration/sliding_sync.rs +++ b/crates/matrix-sdk-ui/tests/integration/sliding_sync.rs @@ -58,6 +58,7 @@ impl Match for SlidingSyncMatcher { macro_rules! sliding_sync_then_assert_request_and_fake_response { ( [$server:ident, $stream:ident] + $( assert pos $pos:expr, )? assert request $sign:tt { $( $request_json:tt )* }, respond with = $( ( code $code:expr ) )? { $( $response_json:tt )* } $( , after delay = $response_delay:expr )? @@ -66,6 +67,7 @@ macro_rules! sliding_sync_then_assert_request_and_fake_response { sliding_sync_then_assert_request_and_fake_response! { [$server, $stream] sync matches Some(Ok(_)), + $( assert pos $pos, )? assert request $sign { $( $request_json )* }, respond with = $( ( code $code ) )? { $( $response_json )* }, $( after delay = $response_delay, )? @@ -75,6 +77,7 @@ macro_rules! sliding_sync_then_assert_request_and_fake_response { ( [$server:ident, $stream:ident] sync matches $sync_result:pat, + $( assert pos $pos:expr, )? assert request $sign:tt { $( $request_json:tt )* }, respond with = $( ( code $code:expr ) )? { $( $response_json:tt )* } $( , after delay = $response_delay:expr )? @@ -117,6 +120,14 @@ macro_rules! sliding_sync_then_assert_request_and_fake_response { root.remove("txn_id"); } + // Validate `pos` from the query parameter if specified. + $( + match $pos { + Some(pos) => assert!(wiremock::matchers::query_param("pos", pos).matches(request)), + None => assert!(wiremock::matchers::query_param_is_missing("pos").matches(request)), + } + )? + if let Err(error) = assert_json_diff::assert_json_matches_no_panic( &json_value, &json!({ $( $request_json )* }), diff --git a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs index e9d28a84e3a..b6bc4211140 100644 --- a/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs +++ b/crates/matrix-sdk-ui/tests/integration/timeline/mod.rs @@ -628,7 +628,7 @@ async fn test_pin_event_is_sent_successfully() { // Pinning a remote event succeeds. setup - .mock_response(ResponseTemplate::new(200).set_body_json(json!({ + .mock_pin_unpin_response(ResponseTemplate::new(200).set_body_json(json!({ "event_id": "$42" }))) .await; @@ -662,7 +662,7 @@ async fn test_pin_event_is_returning_an_error() { assert!(!timeline.items().await.is_empty()); // Pinning a remote event fails. - setup.mock_response(ResponseTemplate::new(400)).await; + setup.mock_pin_unpin_response(ResponseTemplate::new(400)).await; let event_id = setup.event_id(); assert!(timeline.pin_event(event_id).await.is_err()); @@ -680,7 +680,7 @@ async fn test_unpin_event_is_sent_successfully() { // Unpinning a remote event succeeds. setup - .mock_response(ResponseTemplate::new(200).set_body_json(json!({ + .mock_pin_unpin_response(ResponseTemplate::new(200).set_body_json(json!({ "event_id": "$42" }))) .await; @@ -714,7 +714,7 @@ async fn test_unpin_event_is_returning_an_error() { assert!(!timeline.items().await.is_empty()); // Unpinning a remote event fails. - setup.mock_response(ResponseTemplate::new(400)).await; + setup.mock_pin_unpin_response(ResponseTemplate::new(400)).await; let event_id = setup.event_id(); assert!(timeline.unpin_event(event_id).await.is_err()); @@ -834,7 +834,13 @@ impl PinningTestSetup<'_> { let _response = client.sync_once(sync_settings.clone()).await.unwrap(); server.reset().await; - Self { event_id, room_id, client, server, sync_settings, sync_builder } + let setup = Self { event_id, room_id, client, server, sync_settings, sync_builder }; + + // This is necessary to get an empty list of pinned events when there are no + // pinned events state event in the required state + setup.mock_get_empty_pinned_events_state_response().await; + + setup } async fn timeline(&self) -> Timeline { @@ -847,7 +853,7 @@ impl PinningTestSetup<'_> { self.server.reset().await; } - async fn mock_response(&self, response: ResponseTemplate) { + async fn mock_pin_unpin_response(&self, response: ResponseTemplate) { Mock::given(method("PUT")) .and(path_regex(r"^/_matrix/client/r0/rooms/.*/state/m.room.pinned_events/.*?")) .and(header("authorization", "Bearer 1234")) @@ -856,6 +862,15 @@ impl PinningTestSetup<'_> { .await; } + async fn mock_get_empty_pinned_events_state_response(&self) { + Mock::given(method("GET")) + .and(path_regex(r"^/_matrix/client/r0/rooms/.*/state/m.room.pinned_events/.*")) + .and(header("authorization", "Bearer 1234")) + .respond_with(ResponseTemplate::new(404).set_body_json(json!({}))) + .mount(&self.server) + .await; + } + async fn mock_sync(&mut self, is_using_pinned_state_event: bool) { let f = EventFactory::new().sender(user_id!("@a:b.c")); let mut joined_room_builder = JoinedRoomBuilder::new(self.room_id) diff --git a/crates/matrix-sdk/Cargo.toml b/crates/matrix-sdk/Cargo.toml index cf356eb7114..27f91ed7ef4 100644 --- a/crates/matrix-sdk/Cargo.toml +++ b/crates/matrix-sdk/Cargo.toml @@ -82,6 +82,7 @@ eyeball-im = { workspace = true } eyre = { version = "0.6.8", optional = true } futures-core = { workspace = true } futures-util = { workspace = true } +growable-bloom-filter = { workspace = true } http = { workspace = true } imbl = { workspace = true, features = ["serde"] } indexmap = "2.0.2" diff --git a/crates/matrix-sdk/src/client/mod.rs b/crates/matrix-sdk/src/client/mod.rs index 7066fcba545..20268e75d39 100644 --- a/crates/matrix-sdk/src/client/mod.rs +++ b/crates/matrix-sdk/src/client/mod.rs @@ -160,6 +160,7 @@ pub(crate) struct ClientLocks { /// Look at the [`Account::mark_as_dm()`] method for a more detailed /// explanation. pub(crate) mark_as_dm_lock: Mutex<()>, + /// Lock ensuring that only a single secret store is getting opened at the /// same time. /// @@ -167,6 +168,7 @@ pub(crate) struct ClientLocks { /// default secret storage keys. #[cfg(feature = "e2e-encryption")] pub(crate) open_secret_store_lock: Mutex<()>, + /// Lock ensuring that we're only storing a single secret at a time. /// /// Take a look at the [`SecretStore::put_secret`] method for a more @@ -175,23 +177,29 @@ pub(crate) struct ClientLocks { /// [`SecretStore::put_secret`]: crate::encryption::secret_storage::SecretStore::put_secret #[cfg(feature = "e2e-encryption")] pub(crate) store_secret_lock: Mutex<()>, + /// Lock ensuring that only one method at a time might modify our backup. #[cfg(feature = "e2e-encryption")] pub(crate) backup_modify_lock: Mutex<()>, + /// Lock ensuring that we're going to attempt to upload backups for a single /// requester. #[cfg(feature = "e2e-encryption")] pub(crate) backup_upload_lock: Mutex<()>, + /// Handler making sure we only have one group session sharing request in /// flight per room. #[cfg(feature = "e2e-encryption")] pub(crate) group_session_deduplicated_handler: DeduplicatingHandler, + /// Lock making sure we're only doing one key claim request at a time. #[cfg(feature = "e2e-encryption")] pub(crate) key_claim_lock: Mutex<()>, + /// Handler to ensure that only one members request is running at a time, /// given a room. pub(crate) members_request_deduplicated_handler: DeduplicatingHandler, + /// Handler to ensure that only one encryption state request is running at a /// time, given a room. pub(crate) encryption_state_deduplicated_handler: DeduplicatingHandler, @@ -203,6 +211,7 @@ pub(crate) struct ClientLocks { #[cfg(feature = "e2e-encryption")] pub(crate) cross_process_crypto_store_lock: OnceCell>, + /// Latest "generation" of data known by the crypto store. /// /// This is a counter that only increments, set in the database (and can diff --git a/crates/matrix-sdk/src/encryption/mod.rs b/crates/matrix-sdk/src/encryption/mod.rs index b59c3e96d0a..a6ecc1f7754 100644 --- a/crates/matrix-sdk/src/encryption/mod.rs +++ b/crates/matrix-sdk/src/encryption/mod.rs @@ -459,7 +459,7 @@ impl Client { data: &[u8], thumbnail: Option, send_progress: SharedObservable, - ) -> Result<(MediaSource, Option, Option>)> { + ) -> Result<(MediaSource, Option<(MediaSource, Box)>)> { let upload_thumbnail = self.upload_encrypted_thumbnail(thumbnail, content_type, send_progress.clone()); @@ -470,10 +470,9 @@ impl Client { .await }; - let ((thumbnail_source, thumbnail_info), file) = - try_join(upload_thumbnail, upload_attachment).await?; + let (thumbnail, file) = try_join(upload_thumbnail, upload_attachment).await?; - Ok((MediaSource::Encrypted(Box::new(file)), thumbnail_source, thumbnail_info)) + Ok((MediaSource::Encrypted(Box::new(file)), thumbnail)) } /// Uploads an encrypted thumbnail to the media repository, and returns @@ -483,9 +482,9 @@ impl Client { thumbnail: Option, content_type: &mime::Mime, send_progress: SharedObservable, - ) -> Result<(Option, Option>)> { + ) -> Result)>> { let Some(thumbnail) = thumbnail else { - return Ok((None, None)); + return Ok(None); }; let mut cursor = Cursor::new(thumbnail.data); @@ -501,7 +500,7 @@ impl Client { mimetype: Some(thumbnail.content_type.as_ref().to_owned()) }); - Ok((Some(MediaSource::Encrypted(Box::new(file))), Some(Box::new(thumbnail_info)))) + Ok(Some((MediaSource::Encrypted(Box::new(file)), Box::new(thumbnail_info)))) } /// Claim one-time keys creating new Olm sessions. diff --git a/crates/matrix-sdk/src/event_cache/deduplicator.rs b/crates/matrix-sdk/src/event_cache/deduplicator.rs new file mode 100644 index 00000000000..b87dbd01d42 --- /dev/null +++ b/crates/matrix-sdk/src/event_cache/deduplicator.rs @@ -0,0 +1,270 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Simple but efficient types to find duplicated events. See [`Deduplicator`] +//! to learn more. + +use std::{collections::BTreeSet, fmt, sync::Mutex}; + +use growable_bloom_filter::{GrowableBloom, GrowableBloomBuilder}; + +use super::room::events::{Event, RoomEvents}; + +/// `Deduplicator` is an efficient type to find duplicated events. +/// +/// It uses a [bloom filter] to provide a memory efficient probabilistic answer +/// to: “has event E been seen already?”. False positives are possible, while +/// false negatives are impossible. In the case of a positive reply, we fallback +/// to a linear (backward) search on all events to check whether it's a false +/// positive or not +/// +/// [bloom filter]: https://en.wikipedia.org/wiki/Bloom_filter +pub struct Deduplicator { + bloom_filter: Mutex, +} + +impl fmt::Debug for Deduplicator { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.debug_struct("Deduplicator").finish_non_exhaustive() + } +} + +impl Deduplicator { + const APPROXIMATED_MAXIMUM_NUMBER_OF_EVENTS: usize = 800_000; + const DESIRED_FALSE_POSITIVE_RATE: f64 = 0.001; + + /// Create a new `Deduplicator`. + pub fn new() -> Self { + Self { + bloom_filter: Mutex::new( + GrowableBloomBuilder::new() + .estimated_insertions(Self::APPROXIMATED_MAXIMUM_NUMBER_OF_EVENTS) + .desired_error_ratio(Self::DESIRED_FALSE_POSITIVE_RATE) + .build(), + ), + } + } + + /// Scan a collection of events and detect duplications. + /// + /// This method takes a collection of events `new_events_to_scan` and + /// returns a new collection of events, where each event is decorated by + /// a [`Decoration`], so that the caller can decide what to do with + /// these events. + /// + /// Each scanned event will update `Self`'s internal state. + /// + /// `existing_events` represents all events of a room that already exist. + pub fn scan_and_learn<'a, I>( + &'a self, + new_events_to_scan: I, + existing_events: &'a RoomEvents, + ) -> impl Iterator> + 'a + where + I: Iterator + 'a, + { + // `new_scanned_events` is not a field of `Self` because it is used only detect + // duplicates in `new_events_to_scan`. + let mut new_scanned_events = BTreeSet::new(); + + new_events_to_scan.map(move |event| { + let Some(event_id) = event.event_id() else { + // The event has no `event_id`. + return Decoration::Invalid(event); + }; + + if self.bloom_filter.lock().unwrap().check_and_set(&event_id) { + // Oh oh, it looks like we have found a duplicate! + // + // However, bloom filters have false positives. We are NOT sure the event is NOT + // present. Even if the false positive rate is low, we need to + // iterate over all events to ensure it isn't present. + + // First, let's ensure `event` is not a duplicate from `new_events_to_scan`, + // i.e. if the iterator itself contains duplicated events! We use a `BTreeSet`, + // otherwise using a bloom filter again may generate false positives. + if new_scanned_events.contains(&event_id) { + // The iterator contains a duplicated `event`. + return Decoration::Duplicated(event); + } + + // Second, we can iterate over all events to ensure `event` is not present in + // `existing_events`. + let duplicated = existing_events.revents().any(|(_position, other_event)| { + other_event.event_id().as_ref() == Some(&event_id) + }); + + new_scanned_events.insert(event_id); + + if duplicated { + Decoration::Duplicated(event) + } else { + Decoration::Unique(event) + } + } else { + new_scanned_events.insert(event_id); + + // Bloom filter has no false negatives. We are sure the event is NOT present: we + // can keep it in the iterator. + Decoration::Unique(event) + } + }) + } +} + +/// Information about the scanned collection of events. +#[derive(Debug)] +pub enum Decoration { + /// This event is not duplicated. + Unique(I), + + /// This event is duplicated. + Duplicated(I), + + /// This event is invalid (i.e. not well formed). + Invalid(I), +} + +#[cfg(test)] +mod tests { + use assert_matches2::assert_let; + use matrix_sdk_base::deserialized_responses::SyncTimelineEvent; + use ruma::{owned_event_id, user_id, EventId}; + + use super::*; + use crate::test_utils::events::EventFactory; + + fn sync_timeline_event(event_id: &EventId) -> SyncTimelineEvent { + EventFactory::new() + .text_msg("") + .sender(user_id!("@mnt_io:matrix.org")) + .event_id(event_id) + .into_sync() + } + + #[test] + fn test_filter_no_duplicate() { + let event_id_0 = owned_event_id!("$ev0"); + let event_id_1 = owned_event_id!("$ev1"); + let event_id_2 = owned_event_id!("$ev2"); + + let event_0 = sync_timeline_event(&event_id_0); + let event_1 = sync_timeline_event(&event_id_1); + let event_2 = sync_timeline_event(&event_id_2); + + let deduplicator = Deduplicator::new(); + let existing_events = RoomEvents::new(); + + let mut events = + deduplicator.scan_and_learn([event_0, event_1, event_2].into_iter(), &existing_events); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_0)); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_1)); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_2)); + + assert!(events.next().is_none()); + } + + #[test] + fn test_filter_duplicates_in_new_events() { + let event_id_0 = owned_event_id!("$ev0"); + let event_id_1 = owned_event_id!("$ev1"); + + let event_0 = sync_timeline_event(&event_id_0); + let event_1 = sync_timeline_event(&event_id_1); + + let deduplicator = Deduplicator::new(); + let existing_events = RoomEvents::new(); + + let mut events = deduplicator.scan_and_learn( + [ + event_0.clone(), // OK + event_0, // Not OK + event_1, // OK + ] + .into_iter(), + &existing_events, + ); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_0.clone())); + + assert_let!(Some(Decoration::Duplicated(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_0)); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_1)); + + assert!(events.next().is_none()); + } + + #[test] + fn test_filter_duplicates_with_existing_events() { + let event_id_0 = owned_event_id!("$ev0"); + let event_id_1 = owned_event_id!("$ev1"); + let event_id_2 = owned_event_id!("$ev2"); + + let event_0 = sync_timeline_event(&event_id_0); + let event_1 = sync_timeline_event(&event_id_1); + let event_2 = sync_timeline_event(&event_id_2); + + let deduplicator = Deduplicator::new(); + let mut existing_events = RoomEvents::new(); + + // Simulate `event_1` is inserted inside `existing_events`. + { + let mut events = + deduplicator.scan_and_learn([event_1.clone()].into_iter(), &existing_events); + + assert_let!(Some(Decoration::Unique(event_1)) = events.next()); + assert_eq!(event_1.event_id(), Some(event_id_1.clone())); + + assert!(events.next().is_none()); + + drop(events); // make the borrow checker happy. + + // Now we can push `event_1` inside `existing_events`. + existing_events.push_events([event_1]); + } + + // `event_1` will be duplicated. + { + let mut events = deduplicator.scan_and_learn( + [ + event_0, // OK + event_1, // Not OK + event_2, // Ok + ] + .into_iter(), + &existing_events, + ); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_0)); + + assert_let!(Some(Decoration::Duplicated(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_1)); + + assert_let!(Some(Decoration::Unique(event)) = events.next()); + assert_eq!(event.event_id(), Some(event_id_2)); + + assert!(events.next().is_none()); + } + } +} diff --git a/crates/matrix-sdk/src/event_cache/linked_chunk/mod.rs b/crates/matrix-sdk/src/event_cache/linked_chunk/mod.rs index 1bfda5f9df7..99fd02fe5bc 100644 --- a/crates/matrix-sdk/src/event_cache/linked_chunk/mod.rs +++ b/crates/matrix-sdk/src/event_cache/linked_chunk/mod.rs @@ -948,6 +948,15 @@ impl Position { pub fn index(&self) -> usize { self.1 } + + /// Decrement the index part (see [`Self::index`]), i.e. subtract 1. + /// + /// # Panic + /// + /// This method will panic if it will underflow, i.e. if the index is 0. + pub(super) fn decrement_index(&mut self) { + self.1 = self.1.checked_sub(1).expect("Cannot decrement the index because it's already 0"); + } } /// An iterator over a [`LinkedChunk`] that traverses the chunk in backward diff --git a/crates/matrix-sdk/src/event_cache/mod.rs b/crates/matrix-sdk/src/event_cache/mod.rs index ff23b2c4e81..7aa31485386 100644 --- a/crates/matrix-sdk/src/event_cache/mod.rs +++ b/crates/matrix-sdk/src/event_cache/mod.rs @@ -53,6 +53,7 @@ use tracing::{error, info_span, instrument, trace, warn, Instrument as _, Span}; use self::paginator::PaginatorError; use crate::{client::WeakClient, Client}; +mod deduplicator; mod linked_chunk; mod pagination; mod room; diff --git a/crates/matrix-sdk/src/event_cache/room/events.rs b/crates/matrix-sdk/src/event_cache/room/events.rs index f1b4eba7a0a..19d27b2e80d 100644 --- a/crates/matrix-sdk/src/event_cache/room/events.rs +++ b/crates/matrix-sdk/src/event_cache/room/events.rs @@ -12,9 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::cmp::Ordering; + use matrix_sdk_common::deserialized_responses::SyncTimelineEvent; +use ruma::OwnedEventId; +use tracing::{debug, error, warn}; -use super::super::linked_chunk::{Chunk, ChunkIdentifier, Error, Iter, LinkedChunk, Position}; +use super::super::{ + deduplicator::{Decoration, Deduplicator}, + linked_chunk::{Chunk, ChunkIdentifier, Error, Iter, LinkedChunk, Position}, +}; /// An alias for the real event type. pub(crate) type Event = SyncTimelineEvent; @@ -33,6 +40,9 @@ const DEFAULT_CHUNK_CAPACITY: usize = 128; pub struct RoomEvents { /// The real in-memory storage for all the events. chunks: LinkedChunk, + + /// The events deduplicator instance to help finding duplicates. + deduplicator: Deduplicator, } impl Default for RoomEvents { @@ -44,7 +54,7 @@ impl Default for RoomEvents { impl RoomEvents { /// Build a new [`RoomEvents`] struct with zero events. pub fn new() -> Self { - Self { chunks: LinkedChunk::new() } + Self { chunks: LinkedChunk::new(), deduplicator: Deduplicator::new() } } /// Clear all events. @@ -58,9 +68,18 @@ impl RoomEvents { pub fn push_events(&mut self, events: I) where I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - self.chunks.push_items_back(events) + let (unique_events, duplicated_event_ids) = + self.filter_duplicated_events(events.into_iter()); + + // Remove the _old_ duplicated events! + // + // We don't have to worry the removals can change the position of the existing + // events, because we are pushing all _new_ `events` at the back. + self.remove_events(duplicated_event_ids); + + // Push new `events`. + self.chunks.push_items_back(unique_events); } /// Push a gap after all events or gaps. @@ -69,12 +88,21 @@ impl RoomEvents { } /// Insert events at a specified position. - pub fn insert_events_at(&mut self, events: I, position: Position) -> Result<(), Error> + pub fn insert_events_at(&mut self, events: I, mut position: Position) -> Result<(), Error> where I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - self.chunks.insert_items_at(events, position) + let (unique_events, duplicated_event_ids) = + self.filter_duplicated_events(events.into_iter()); + + // Remove the _old_ duplicated events! + // + // We **have to worry* the removals can change the position of the + // existing events. We **have** to update the `position` + // argument value for each removal. + self.remove_events_and_update_insert_position(duplicated_event_ids, &mut position); + + self.chunks.insert_items_at(unique_events, position) } /// Insert a gap at a specified position. @@ -96,9 +124,19 @@ impl RoomEvents { ) -> Result<&Chunk, Error> where I: IntoIterator, - I::IntoIter: ExactSizeIterator, { - self.chunks.replace_gap_at(events, gap_identifier) + let (unique_events, duplicated_event_ids) = + self.filter_duplicated_events(events.into_iter()); + + // Remove the _old_ duplicated events! + // + // We don't have to worry the removals can change the position of the existing + // events, because we are replacing a gap: its identifier will not change + // because of the removals. + self.remove_events(duplicated_event_ids); + + // Replace the gap by new events. + self.chunks.replace_gap_at(unique_events, gap_identifier) } /// Search for a chunk, and return its identifier. @@ -129,24 +167,182 @@ impl RoomEvents { pub fn events(&self) -> impl Iterator { self.chunks.items() } + + /// Deduplicate `events` considering all events in `Self::chunks`. + /// + /// The returned tuple contains (i) the unique events, and (ii) the + /// duplicated events (by ID). + fn filter_duplicated_events<'a, I>(&'a mut self, events: I) -> (Vec, Vec) + where + I: Iterator + 'a, + { + let mut duplicated_event_ids = Vec::new(); + + let deduplicated_events = self + .deduplicator + .scan_and_learn(events, self) + .filter_map(|decorated_event| match decorated_event { + Decoration::Unique(event) => Some(event), + Decoration::Duplicated(event) => { + debug!(event_id = ?event.event_id(), "Found a duplicated event"); + + duplicated_event_ids.push( + event + .event_id() + // SAFETY: An event with no ID is decorated as `Decoration::Invalid`. + // Thus, it's safe to unwrap the `Option` here. + .expect("The event has no ID"), + ); + + // Keep the new event! + Some(event) + } + Decoration::Invalid(event) => { + warn!(?event, "Found an event with no ID"); + + None + } + }) + .collect(); + + (deduplicated_events, duplicated_event_ids) + } +} + +// Private implementations, implementation specific. +impl RoomEvents { + /// Remove some events from `Self::chunks`. + /// + /// This method iterates over all event IDs in `event_ids` and removes the + /// associated event (if it exists) from `Self::chunks`. + /// + /// This is used to remove duplicated events, see + /// [`Self::filter_duplicated_events`]. + fn remove_events(&mut self, event_ids: Vec) { + for event_id in event_ids { + let Some(event_position) = self.revents().find_map(|(position, event)| { + (event.event_id().as_ref() == Some(&event_id)).then_some(position) + }) else { + error!(?event_id, "Cannot find the event to remove"); + + continue; + }; + + self.chunks + .remove_item_at(event_position) + .expect("Failed to remove an event we have just found"); + } + } + + /// Remove all events from `Self::chunks` and update a fix [`Position`]. + /// + /// This method iterates over all event IDs in `event_ids` and removes the + /// associated event (if it exists) from `Self::chunks`, exactly like + /// [`Self::remove_events`]. The difference is that it will maintain a + /// [`Position`] according to the removals. This is useful for example if + /// one needs to insert events at a particular position, but it first + /// collects events that must be removed before the insertions (e.g. + /// duplicated events). One has to remove events, but also to maintain the + /// `Position` to its correct initial _target_. Let's see a practical + /// example: + /// + /// ```text + /// // Pseudo-code. + /// + /// let room_events = room_events(['a', 'b', 'c']); + /// let position = position_of('b' in room_events); + /// room_events.remove_events(['a']) + /// + /// // `position` no longer targets 'b', it now targets 'c', because all + /// // items have shifted to the left once. Instead, let's do: + /// + /// let room_events = room_events(['a', 'b', 'c']); + /// let position = position_of('b' in room_events); + /// room_events.remove_events_and_update_insert_position(['a'], &mut position) + /// + /// // `position` has been updated to still target 'b'. + /// ``` + /// + /// This is used to remove duplicated events, see + /// [`Self::filter_duplicated_events`]. + fn remove_events_and_update_insert_position( + &mut self, + event_ids: Vec, + position: &mut Position, + ) { + for event_id in event_ids { + let Some(event_position) = self.revents().find_map(|(position, event)| { + (event.event_id().as_ref() == Some(&event_id)).then_some(position) + }) else { + error!(?event_id, "Cannot find the event to remove"); + + continue; + }; + + self.chunks + .remove_item_at(event_position) + .expect("Failed to remove an event we have just found"); + + // A `Position` is composed of a `ChunkIdentifier` and an index. + // The `ChunkIdentifier` is stable, i.e. it won't change if an + // event is removed in another chunk. It means we only need to + // update `position` if the removal happened in **the same + // chunk**. + if event_position.chunk_identifier() == position.chunk_identifier() { + // Now we can compare the position indices. + match event_position.index().cmp(&position.index()) { + // `event_position`'s index < `position`'s index + Ordering::Less => { + // An event has been removed _before_ the new + // events: `position` needs to be shifted to the + // left by 1. + position.decrement_index(); + } + + // `event_position`'s index >= `position`'s index + Ordering::Equal | Ordering::Greater => { + // An event has been removed at the _same_ position of + // or _after_ the new events: `position` does _NOT_ need + // to be modified. + } + } + } + } + } } #[cfg(test)] mod tests { use assert_matches2::assert_let; - use matrix_sdk_test::{EventBuilder, ALICE}; - use ruma::{events::room::message::RoomMessageEventContent, EventId, OwnedEventId}; + use ruma::{user_id, EventId, OwnedEventId}; use super::*; + use crate::test_utils::events::EventFactory; + + macro_rules! assert_events_eq { + ( $events_iterator:expr, [ $( ( $event_id:ident at ( $chunk_identifier:literal, $index:literal ) ) ),* $(,)? ] ) => { + { + let mut events = $events_iterator; + + $( + assert_let!(Some((position, event)) = events.next()); + assert_eq!(position.chunk_identifier(), $chunk_identifier ); + assert_eq!(position.index(), $index ); + assert_eq!(event.event_id().unwrap(), $event_id ); + )* + + assert!(events.next().is_none(), "No more events are expected"); + } + }; + } - fn new_event(event_builder: &EventBuilder, event_id: &str) -> (OwnedEventId, Event) { + fn new_event(event_id: &str) -> (OwnedEventId, Event) { let event_id = EventId::parse(event_id).unwrap(); - - let event = SyncTimelineEvent::new(event_builder.make_sync_message_event_with_id( - *ALICE, - &event_id, - RoomMessageEventContent::text_plain("foo"), - )); + let event = EventFactory::new() + .text_msg("") + .sender(user_id!("@mnt_io:matrix.org")) + .event_id(&event_id) + .into_sync(); (event_id, event) } @@ -160,73 +356,59 @@ mod tests { #[test] fn test_push_events() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); - let (event_id_2, event_2) = new_event(&event_builder, "$ev2"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); let mut room_events = RoomEvents::new(); room_events.push_events([event_0, event_1]); room_events.push_events([event_2]); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 2); - assert_eq!(event.event_id().unwrap(), event_id_2); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (0, 1)), + (event_id_2 at (0, 2)), + ] + ); } #[test] fn test_push_events_with_duplicates() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); let mut room_events = RoomEvents::new(); - room_events.push_events([event_0.clone()]); - room_events.push_events([event_0]); + room_events.push_events([event_0.clone(), event_1]); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (0, 1)), + ] + ); - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_0); + // Everything is alright. Now let's push a duplicated event. + room_events.push_events([event_0]); - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + // The first `event_id_0` has been removed. + (event_id_1 at (0, 0)), + (event_id_0 at (0, 1)), + ] + ); } #[test] fn test_push_gap() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); let mut room_events = RoomEvents::new(); @@ -234,21 +416,13 @@ mod tests { room_events.push_gap(Gap { prev_token: "hello".to_owned() }); room_events.push_events([event_1]); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (2, 0)), + ] + ); { let mut chunks = room_events.chunks(); @@ -268,11 +442,9 @@ mod tests { #[test] fn test_insert_events_at() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); - let (event_id_2, event_2) = new_event(&event_builder, "$ev2"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); let mut room_events = RoomEvents::new(); @@ -287,75 +459,62 @@ mod tests { room_events.insert_events_at([event_2], position_of_event_1).unwrap(); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_2); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 2); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_2 at (0, 1)), + (event_id_1 at (0, 2)), + ] + ); } #[test] - fn test_insert_events_at_with_dupicates() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); + fn test_insert_events_at_with_duplicates() { + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); + let (event_id_3, event_3) = new_event("$ev3"); let mut room_events = RoomEvents::new(); - room_events.push_events([event_0, event_1.clone()]); + room_events.push_events([event_0.clone(), event_1, event_2]); - let position_of_event_1 = room_events + let position_of_event_2 = room_events .events() .find_map(|(position, event)| { - (event.event_id().unwrap() == event_id_1).then_some(position) + (event.event_id().unwrap() == event_id_2).then_some(position) }) .unwrap(); - room_events.insert_events_at([event_1], position_of_event_1).unwrap(); - - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 2); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (0, 1)), + (event_id_2 at (0, 2)), + ] + ); + + // Everything is alright. Now let's insert a duplicated events! + room_events.insert_events_at([event_0, event_3], position_of_event_2).unwrap(); + + assert_events_eq!( + room_events.events(), + [ + // The first `event_id_0` has been removed. + (event_id_1 at (0, 0)), + (event_id_0 at (0, 1)), + (event_id_3 at (0, 2)), + (event_id_2 at (0, 3)), + ] + ); } + #[test] fn test_insert_gap_at() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); let mut room_events = RoomEvents::new(); @@ -372,21 +531,13 @@ mod tests { .insert_gap_at(Gap { prev_token: "hello".to_owned() }, position_of_event_1) .unwrap(); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (2, 0)), + ] + ); { let mut chunks = room_events.chunks(); @@ -406,11 +557,9 @@ mod tests { #[test] fn test_replace_gap_at() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); - let (event_id_2, event_2) = new_event(&event_builder, "$ev2"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); let mut room_events = RoomEvents::new(); @@ -425,26 +574,14 @@ mod tests { room_events.replace_gap_at([event_1, event_2], chunk_identifier_of_gap).unwrap(); - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_2); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (2, 0)), + (event_id_2 at (2, 1)), + ] + ); { let mut chunks = room_events.chunks(); @@ -461,14 +598,13 @@ mod tests { #[test] fn test_replace_gap_at_with_duplicates() { - let event_builder = EventBuilder::new(); - - let (event_id_0, event_0) = new_event(&event_builder, "$ev0"); - let (event_id_1, event_1) = new_event(&event_builder, "$ev1"); + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); let mut room_events = RoomEvents::new(); - room_events.push_events([event_0.clone()]); + room_events.push_events([event_0.clone(), event_1]); room_events.push_gap(Gap { prev_token: "hello".to_owned() }); let chunk_identifier_of_gap = room_events @@ -477,28 +613,26 @@ mod tests { .unwrap() .chunk_identifier(); - room_events.replace_gap_at([event_0, event_1], chunk_identifier_of_gap).unwrap(); - - { - let mut events = room_events.events(); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 0); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 0); - assert_eq!(event.event_id().unwrap(), event_id_0); - - assert_let!(Some((position, event)) = events.next()); - assert_eq!(position.chunk_identifier(), 2); - assert_eq!(position.index(), 1); - assert_eq!(event.event_id().unwrap(), event_id_1); - - assert!(events.next().is_none()); - } + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (0, 1)), + ] + ); + + // Everything is alright. Now let's replace a gap with a duplicated event. + room_events.replace_gap_at([event_0, event_2], chunk_identifier_of_gap).unwrap(); + + assert_events_eq!( + room_events.events(), + [ + // The first `event_id_0` has been removed. + (event_id_1 at (0, 0)), + (event_id_0 at (2, 0)), + (event_id_2 at (2, 1)), + ] + ); { let mut chunks = room_events.chunks(); @@ -512,4 +646,211 @@ mod tests { assert!(chunks.next().is_none()); } } + + #[test] + fn test_remove_events() { + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); + let (event_id_3, event_3) = new_event("$ev3"); + + // Push some events. + let mut room_events = RoomEvents::new(); + room_events.push_events([event_0, event_1, event_2, event_3]); + + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_1 at (0, 1)), + (event_id_2 at (0, 2)), + (event_id_3 at (0, 3)), + ] + ); + + // Remove some events. + room_events.remove_events(vec![event_id_1, event_id_3]); + + assert_events_eq!( + room_events.events(), + [ + (event_id_0 at (0, 0)), + (event_id_2 at (0, 1)), + ] + ); + } + + #[test] + fn test_remove_events_unknown_event() { + let (event_id_0, _event_0) = new_event("$ev0"); + + // Push ZERO event. + let mut room_events = RoomEvents::new(); + + assert_events_eq!(room_events.events(), []); + + // Remove one undefined event. + // No error is expected. + room_events.remove_events(vec![event_id_0]); + + assert_events_eq!(room_events.events(), []); + + let mut events = room_events.events(); + assert!(events.next().is_none()); + } + + #[test] + fn test_remove_events_and_update_insert_position() { + let (event_id_0, event_0) = new_event("$ev0"); + let (event_id_1, event_1) = new_event("$ev1"); + let (event_id_2, event_2) = new_event("$ev2"); + let (event_id_3, event_3) = new_event("$ev3"); + let (event_id_4, event_4) = new_event("$ev4"); + let (event_id_5, event_5) = new_event("$ev5"); + let (event_id_6, event_6) = new_event("$ev6"); + let (event_id_7, event_7) = new_event("$ev7"); + let (event_id_8, event_8) = new_event("$ev8"); + + // Push some events. + let mut room_events = RoomEvents::new(); + room_events.push_events([event_0, event_1, event_2, event_3, event_4, event_5, event_6]); + room_events.push_gap(Gap { prev_token: "raclette".to_owned() }); + room_events.push_events([event_7, event_8]); + + fn position_of(room_events: &RoomEvents, event_id: &EventId) -> Position { + room_events + .events() + .find_map(|(position, event)| { + (event.event_id().unwrap() == event_id).then_some(position) + }) + .unwrap() + } + + // In the same chunk… + { + // Get the position of `event_4`. + let mut position = position_of(&room_events, &event_id_4); + + // Remove one event BEFORE `event_4`. + // + // The position must move to the left by 1. + { + let previous_position = position; + room_events + .remove_events_and_update_insert_position(vec![event_id_0], &mut position); + + assert_eq!(previous_position.chunk_identifier(), position.chunk_identifier()); + assert_eq!(previous_position.index() - 1, position.index()); + + // It still represents the position of `event_4`. + assert_eq!(position, position_of(&room_events, &event_id_4)); + } + + // Remove one event AFTER `event_4`. + // + // The position must not move. + { + let previous_position = position; + room_events + .remove_events_and_update_insert_position(vec![event_id_5], &mut position); + + assert_eq!(previous_position.chunk_identifier(), position.chunk_identifier()); + assert_eq!(previous_position.index(), position.index()); + + // It still represents the position of `event_4`. + assert_eq!(position, position_of(&room_events, &event_id_4)); + } + + // Remove one event: `event_4`. + // + // The position must not move. + { + let previous_position = position; + room_events + .remove_events_and_update_insert_position(vec![event_id_4], &mut position); + + assert_eq!(previous_position.chunk_identifier(), position.chunk_identifier()); + assert_eq!(previous_position.index(), position.index()); + } + + // Check the events. + assert_events_eq!( + room_events.events(), + [ + (event_id_1 at (0, 0)), + (event_id_2 at (0, 1)), + (event_id_3 at (0, 2)), + (event_id_6 at (0, 3)), + (event_id_7 at (2, 0)), + (event_id_8 at (2, 1)), + ] + ); + } + + // In another chunk… + { + // Get the position of `event_7`. + let mut position = position_of(&room_events, &event_id_7); + + // Remove one event BEFORE `event_7`. + // + // The position must not move because it happens in another chunk. + { + let previous_position = position; + room_events + .remove_events_and_update_insert_position(vec![event_id_1], &mut position); + + assert_eq!(previous_position.chunk_identifier(), position.chunk_identifier()); + assert_eq!(previous_position.index(), position.index()); + + // It still represents the position of `event_7`. + assert_eq!(position, position_of(&room_events, &event_id_7)); + } + + // Check the events. + assert_events_eq!( + room_events.events(), + [ + (event_id_2 at (0, 0)), + (event_id_3 at (0, 1)), + (event_id_6 at (0, 2)), + (event_id_7 at (2, 0)), + (event_id_8 at (2, 1)), + ] + ); + } + + // In the same chunk, but remove multiple events, just for the fun and to ensure + // the loop works correctly. + { + // Get the position of `event_6`. + let mut position = position_of(&room_events, &event_id_6); + + // Remove three events BEFORE `event_6`. + // + // The position must move. + { + let previous_position = position; + room_events.remove_events_and_update_insert_position( + vec![event_id_2, event_id_3, event_id_7], + &mut position, + ); + + assert_eq!(previous_position.chunk_identifier(), position.chunk_identifier()); + assert_eq!(previous_position.index() - 2, position.index()); + + // It still represents the position of `event_6`. + assert_eq!(position, position_of(&room_events, &event_id_6)); + } + + // Check the events. + assert_events_eq!( + room_events.events(), + [ + (event_id_6 at (0, 0)), + (event_id_8 at (2, 0)), + ] + ); + } + } } diff --git a/crates/matrix-sdk/src/media.rs b/crates/matrix-sdk/src/media.rs index c4553dba731..a25466e0a98 100644 --- a/crates/matrix-sdk/src/media.rs +++ b/crates/matrix-sdk/src/media.rs @@ -613,7 +613,7 @@ impl Media { data: Vec, thumbnail: Option, send_progress: SharedObservable, - ) -> Result<(MediaSource, Option, Option>)> { + ) -> Result<(MediaSource, Option<(MediaSource, Box)>)> { let upload_thumbnail = self.upload_thumbnail(thumbnail, send_progress.clone()); let upload_attachment = async move { @@ -623,10 +623,9 @@ impl Media { .map_err(Error::from) }; - let ((thumbnail_source, thumbnail_info), response) = - try_join(upload_thumbnail, upload_attachment).await?; + let (thumbnail, response) = try_join(upload_thumbnail, upload_attachment).await?; - Ok((MediaSource::Plain(response.content_uri), thumbnail_source, thumbnail_info)) + Ok((MediaSource::Plain(response.content_uri), thumbnail)) } /// Uploads an unencrypted thumbnail to the media repository, and returns @@ -635,9 +634,9 @@ impl Media { &self, thumbnail: Option, send_progress: SharedObservable, - ) -> Result<(Option, Option>)> { + ) -> Result)>> { let Some(thumbnail) = thumbnail else { - return Ok((None, None)); + return Ok(None); }; let response = self @@ -654,6 +653,6 @@ impl Media { { mimetype: Some(thumbnail.content_type.as_ref().to_owned()) } ); - Ok((Some(MediaSource::Plain(url)), Some(Box::new(thumbnail_info)))) + Ok(Some((MediaSource::Plain(url), Box::new(thumbnail_info)))) } } diff --git a/crates/matrix-sdk/src/notification_settings/command.rs b/crates/matrix-sdk/src/notification_settings/command.rs index ed87c9bb5b2..f73ce30feb4 100644 --- a/crates/matrix-sdk/src/notification_settings/command.rs +++ b/crates/matrix-sdk/src/notification_settings/command.rs @@ -1,7 +1,6 @@ use std::fmt::Debug; use ruma::{ - api::client::push::RuleScope, push::{ Action, NewConditionalPushRule, NewPatternedPushRule, NewPushRule, NewSimplePushRule, PushCondition, RuleKind, Tweak, @@ -15,17 +14,17 @@ use crate::NotificationSettingsError; #[derive(Clone, Debug)] pub(crate) enum Command { /// Set a new `Room` push rule - SetRoomPushRule { scope: RuleScope, room_id: OwnedRoomId, notify: bool }, + SetRoomPushRule { room_id: OwnedRoomId, notify: bool }, /// Set a new `Override` push rule matching a `RoomId` - SetOverridePushRule { scope: RuleScope, rule_id: String, room_id: OwnedRoomId, notify: bool }, + SetOverridePushRule { rule_id: String, room_id: OwnedRoomId, notify: bool }, /// Set a new push rule for a keyword. - SetKeywordPushRule { scope: RuleScope, keyword: String }, + SetKeywordPushRule { keyword: String }, /// Set whether a push rule is enabled - SetPushRuleEnabled { scope: RuleScope, kind: RuleKind, rule_id: String, enabled: bool }, + SetPushRuleEnabled { kind: RuleKind, rule_id: String, enabled: bool }, /// Delete a push rule - DeletePushRule { scope: RuleScope, kind: RuleKind, rule_id: String }, + DeletePushRule { kind: RuleKind, rule_id: String }, /// Set a list of actions - SetPushRuleActions { scope: RuleScope, kind: RuleKind, rule_id: String, actions: Vec }, + SetPushRuleActions { kind: RuleKind, rule_id: String, actions: Vec }, } fn get_notify_actions(notify: bool) -> Vec { @@ -40,13 +39,13 @@ impl Command { /// Tries to create a push rule corresponding to this command pub(crate) fn to_push_rule(&self) -> Result { match self { - Self::SetRoomPushRule { scope: _, room_id, notify } => { + Self::SetRoomPushRule { room_id, notify } => { // `Room` push rule for this `room_id` let new_rule = NewSimplePushRule::new(room_id.clone(), get_notify_actions(*notify)); Ok(NewPushRule::Room(new_rule)) } - Self::SetOverridePushRule { scope: _, rule_id, room_id, notify } => { + Self::SetOverridePushRule { rule_id, room_id, notify } => { // `Override` push rule matching this `room_id` let new_rule = NewConditionalPushRule::new( rule_id.clone(), @@ -59,7 +58,7 @@ impl Command { Ok(NewPushRule::Override(new_rule)) } - Self::SetKeywordPushRule { scope: _, keyword } => { + Self::SetKeywordPushRule { keyword } => { // `Content` push rule matching this keyword let new_rule = NewPatternedPushRule::new( keyword.clone(), diff --git a/crates/matrix-sdk/src/notification_settings/mod.rs b/crates/matrix-sdk/src/notification_settings/mod.rs index a5c153400e1..58a89f2c03e 100644 --- a/crates/matrix-sdk/src/notification_settings/mod.rs +++ b/crates/matrix-sdk/src/notification_settings/mod.rs @@ -451,44 +451,39 @@ impl NotificationSettings { let request_config = Some(RequestConfig::short_retry()); for command in &rule_commands.commands { match command { - Command::DeletePushRule { scope, kind, rule_id } => { - let request = delete_pushrule::v3::Request::new( - scope.clone(), - kind.clone(), - rule_id.clone(), - ); + Command::DeletePushRule { kind, rule_id } => { + let request = delete_pushrule::v3::Request::new(kind.clone(), rule_id.clone()); self.client.send(request, request_config).await.map_err(|error| { error!("Unable to delete {kind} push rule `{rule_id}`: {error}"); NotificationSettingsError::UnableToRemovePushRule })?; } - Command::SetRoomPushRule { scope, room_id, notify: _ } => { + Command::SetRoomPushRule { room_id, notify: _ } => { let push_rule = command.to_push_rule()?; - let request = set_pushrule::v3::Request::new(scope.clone(), push_rule); + let request = set_pushrule::v3::Request::new(push_rule); self.client.send(request, request_config).await.map_err(|error| { error!("Unable to set room push rule `{room_id}`: {error}"); NotificationSettingsError::UnableToAddPushRule })?; } - Command::SetOverridePushRule { scope, rule_id, room_id: _, notify: _ } => { + Command::SetOverridePushRule { rule_id, room_id: _, notify: _ } => { let push_rule = command.to_push_rule()?; - let request = set_pushrule::v3::Request::new(scope.clone(), push_rule); + let request = set_pushrule::v3::Request::new(push_rule); self.client.send(request, request_config).await.map_err(|error| { error!("Unable to set override push rule `{rule_id}`: {error}"); NotificationSettingsError::UnableToAddPushRule })?; } - Command::SetKeywordPushRule { scope, keyword: _ } => { + Command::SetKeywordPushRule { keyword: _ } => { let push_rule = command.to_push_rule()?; - let request = set_pushrule::v3::Request::new(scope.clone(), push_rule); + let request = set_pushrule::v3::Request::new(push_rule); self.client .send(request, request_config) .await .map_err(|_| NotificationSettingsError::UnableToAddPushRule)?; } - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { let request = set_pushrule_enabled::v3::Request::new( - scope.clone(), kind.clone(), rule_id.clone(), *enabled, @@ -498,9 +493,8 @@ impl NotificationSettings { NotificationSettingsError::UnableToUpdatePushRule })?; } - Command::SetPushRuleActions { scope, kind, rule_id, actions } => { + Command::SetPushRuleActions { kind, rule_id, actions } => { let request = set_pushrule_actions::v3::Request::new( - scope.clone(), kind.clone(), rule_id.clone(), actions.clone(), diff --git a/crates/matrix-sdk/src/notification_settings/rule_commands.rs b/crates/matrix-sdk/src/notification_settings/rule_commands.rs index 3ac2cda73d9..47956c412b1 100644 --- a/crates/matrix-sdk/src/notification_settings/rule_commands.rs +++ b/crates/matrix-sdk/src/notification_settings/rule_commands.rs @@ -1,5 +1,4 @@ use ruma::{ - api::client::push::RuleScope, push::{ Action, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, RuleKind, Ruleset, @@ -31,13 +30,8 @@ impl RuleCommands { notify: bool, ) -> Result<(), NotificationSettingsError> { let command = match kind { - RuleKind::Room => Command::SetRoomPushRule { - scope: RuleScope::Global, - room_id: room_id.to_owned(), - notify, - }, + RuleKind::Room => Command::SetRoomPushRule { room_id: room_id.to_owned(), notify }, RuleKind::Override => Command::SetOverridePushRule { - scope: RuleScope::Global, rule_id: room_id.to_string(), room_id: room_id.to_owned(), notify, @@ -60,7 +54,7 @@ impl RuleCommands { &mut self, keyword: String, ) -> Result<(), NotificationSettingsError> { - let command = Command::SetKeywordPushRule { scope: RuleScope::Global, keyword }; + let command = Command::SetKeywordPushRule { keyword }; self.rules.insert(command.to_push_rule()?, None, None)?; self.commands.push(command); @@ -75,7 +69,7 @@ impl RuleCommands { rule_id: String, ) -> Result<(), RemovePushRuleError> { self.rules.remove(kind.clone(), &rule_id)?; - self.commands.push(Command::DeletePushRule { scope: RuleScope::Global, kind, rule_id }); + self.commands.push(Command::DeletePushRule { kind, rule_id }); Ok(()) } @@ -90,7 +84,6 @@ impl RuleCommands { .set_enabled(kind.clone(), rule_id, enabled) .map_err(|_| NotificationSettingsError::RuleNotFound(rule_id.to_owned()))?; self.commands.push(Command::SetPushRuleEnabled { - scope: RuleScope::Global, kind, rule_id: rule_id.to_owned(), enabled, @@ -182,7 +175,6 @@ impl RuleCommands { .set_actions(kind.clone(), rule_id, actions.clone()) .map_err(|_| NotificationSettingsError::RuleNotFound(rule_id.to_owned()))?; self.commands.push(Command::SetPushRuleActions { - scope: RuleScope::Global, kind, rule_id: rule_id.to_owned(), actions, @@ -196,7 +188,6 @@ mod tests { use assert_matches::assert_matches; use matrix_sdk_test::async_test; use ruma::{ - api::client::push::RuleScope, push::{ Action, NewPushRule, NewSimplePushRule, PredefinedContentRuleId, PredefinedOverrideRuleId, PredefinedUnderrideRuleId, RemovePushRuleError, RuleKind, @@ -229,8 +220,7 @@ mod tests { // Exactly one command must have been created. assert_eq!(rule_commands.commands.len(), 1); assert_matches!(&rule_commands.commands[0], - Command::SetRoomPushRule { scope, room_id: command_room_id, notify } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetRoomPushRule { room_id: command_room_id, notify } => { assert_eq!(command_room_id, &room_id); assert!(notify); } @@ -249,8 +239,7 @@ mod tests { // Exactly one command must have been created. assert_eq!(rule_commands.commands.len(), 1); assert_matches!(&rule_commands.commands[0], - Command::SetOverridePushRule { scope, room_id: command_room_id, rule_id, notify } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetOverridePushRule {room_id: command_room_id, rule_id, notify } => { assert_eq!(command_room_id, &room_id); assert_eq!(rule_id, room_id.as_str()); assert!(notify); @@ -298,8 +287,7 @@ mod tests { // Exactly one command must have been created. assert_eq!(rule_commands.commands.len(), 1); assert_matches!(&rule_commands.commands[0], - Command::DeletePushRule { scope, kind, rule_id } => { - assert_eq!(scope, &RuleScope::Global); + Command::DeletePushRule { kind, rule_id } => { assert_eq!(kind, &RuleKind::Room); assert_eq!(rule_id, room_id.as_str()); } @@ -351,8 +339,7 @@ mod tests { // Exactly one command must have been created. assert_eq!(rule_commands.commands.len(), 1); assert_matches!(&rule_commands.commands[0], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Override); assert_eq!(rule_id, PredefinedOverrideRuleId::Reaction.as_str()); assert!(enabled); @@ -426,8 +413,7 @@ mod tests { assert_eq!(rule_commands.commands.len(), 3); assert_matches!(&rule_commands.commands[0], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Override); assert_eq!(rule_id, PredefinedOverrideRuleId::IsUserMention.as_str()); assert!(enabled); @@ -437,8 +423,7 @@ mod tests { #[allow(deprecated)] { assert_matches!(&rule_commands.commands[1], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Content); assert_eq!(rule_id, PredefinedContentRuleId::ContainsUserName.as_str()); assert!(enabled); @@ -446,8 +431,7 @@ mod tests { ); assert_matches!(&rule_commands.commands[2], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Override); assert_eq!(rule_id, PredefinedOverrideRuleId::ContainsDisplayName.as_str()); assert!(enabled); @@ -499,8 +483,7 @@ mod tests { assert_eq!(rule_commands.commands.len(), 2); assert_matches!(&rule_commands.commands[0], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Override); assert_eq!(rule_id, PredefinedOverrideRuleId::IsRoomMention.as_str()); assert!(enabled); @@ -510,8 +493,7 @@ mod tests { #[allow(deprecated)] { assert_matches!(&rule_commands.commands[1], - Command::SetPushRuleEnabled { scope, kind, rule_id, enabled } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { assert_eq!(kind, &RuleKind::Override); assert_eq!(rule_id, PredefinedOverrideRuleId::RoomNotif.as_str()); assert!(enabled); @@ -550,8 +532,7 @@ mod tests { // and a `SetPushRuleActions` command must have been added assert_eq!(rule_commands.commands.len(), 1); assert_matches!(&rule_commands.commands[0], - Command::SetPushRuleActions { scope, kind, rule_id, actions } => { - assert_eq!(scope, &RuleScope::Global); + Command::SetPushRuleActions { kind, rule_id, actions } => { assert_eq!(kind, &RuleKind::Underride); assert_eq!(rule_id, PredefinedUnderrideRuleId::Message.as_str()); assert_eq!(actions.len(), 2); diff --git a/crates/matrix-sdk/src/notification_settings/rules.rs b/crates/matrix-sdk/src/notification_settings/rules.rs index 9414852585b..353af7b837d 100644 --- a/crates/matrix-sdk/src/notification_settings/rules.rs +++ b/crates/matrix-sdk/src/notification_settings/rules.rs @@ -253,7 +253,7 @@ impl Rules { pub(crate) fn apply(&mut self, commands: RuleCommands) { for command in commands.commands { match command { - Command::DeletePushRule { scope: _, kind, rule_id } => { + Command::DeletePushRule { kind, rule_id } => { _ = self.ruleset.remove(kind, rule_id); } Command::SetRoomPushRule { .. } @@ -263,10 +263,10 @@ impl Rules { _ = self.ruleset.insert(push_rule, None, None); } } - Command::SetPushRuleEnabled { scope: _, kind, rule_id, enabled } => { + Command::SetPushRuleEnabled { kind, rule_id, enabled } => { _ = self.ruleset.set_enabled(kind, rule_id, enabled); } - Command::SetPushRuleActions { scope: _, kind, rule_id, actions } => { + Command::SetPushRuleActions { kind, rule_id, actions } => { _ = self.ruleset.set_actions(kind, rule_id, actions); } } diff --git a/crates/matrix-sdk/src/room/mod.rs b/crates/matrix-sdk/src/room/mod.rs index 24b6e3177eb..b841f35c7d4 100644 --- a/crates/matrix-sdk/src/room/mod.rs +++ b/crates/matrix-sdk/src/room/mod.rs @@ -30,6 +30,7 @@ use futures_util::{ future::{try_join, try_join_all}, stream::FuturesUnordered, }; +use http::StatusCode; #[cfg(all(feature = "e2e-encryption", not(target_arch = "wasm32")))] pub use identity_status_changes::IdentityStatusChanges; #[cfg(feature = "e2e-encryption")] @@ -82,7 +83,7 @@ use ruma::{ beacon_info::BeaconInfoEventContent, call::notify::{ApplicationType, CallNotifyEventContent, NotifyType}, direct::DirectEventContent, - marked_unread::MarkedUnreadEventContent, + marked_unread::{MarkedUnreadEventContent, UnstableMarkedUnreadEventContent}, receipt::{Receipt, ReceiptThread, ReceiptType}, room::{ avatar::{self, RoomAvatarEventContent}, @@ -90,11 +91,12 @@ use ruma::{ history_visibility::HistoryVisibility, message::{ AudioInfo, AudioMessageEventContent, FileInfo, FileMessageEventContent, - ImageMessageEventContent, MessageType, RoomMessageEventContent, + FormattedBody, ImageMessageEventContent, MessageType, RoomMessageEventContent, UnstableAudioDetailsContentBlock, UnstableVoiceContentBlock, VideoInfo, VideoMessageEventContent, }, name::RoomNameEventContent, + pinned_events::RoomPinnedEventsEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, server_acl::RoomServerAclEventContent, topic::RoomTopicEventContent, @@ -1965,7 +1967,7 @@ impl Room { }; #[cfg(feature = "e2e-encryption")] - let (media_source, thumbnail_source, thumbnail_info) = if self.is_encrypted().await? { + let (media_source, thumbnail) = if self.is_encrypted().await? { self.client .upload_encrypted_media_and_thumbnail(content_type, &data, thumbnail, send_progress) .await? @@ -1984,7 +1986,7 @@ impl Room { }; #[cfg(not(feature = "e2e-encryption"))] - let (media_source, thumbnail_source, thumbnail_info) = self + let (media_source, thumbnail) = self .client .media() .upload_plain_media_and_thumbnail(content_type, data.clone(), thumbnail, send_progress) @@ -2003,7 +2005,7 @@ impl Room { } if let Some(((data, height, width), source)) = - thumbnail_cache_info.zip(thumbnail_source.as_ref()) + thumbnail_cache_info.zip(thumbnail.as_ref().map(|tuple| &tuple.0)) { debug!("caching the thumbnail"); @@ -2027,21 +2029,19 @@ impl Room { } } - let msg_type = self.make_attachment_message( - content_type, - media_source, - thumbnail_source, - thumbnail_info, - filename, - config, + let content = Self::make_attachment_event( + self.make_attachment_type( + content_type, + filename, + media_source, + config.caption, + config.formatted_caption, + config.info, + thumbnail, + ), + mentions, ); - let mut content = RoomMessageEventContent::new(msg_type); - - if let Some(mentions) = mentions { - content = content.add_mentions(mentions); - } - let mut fut = self.send(content); if let Some(txn_id) = txn_id { fut = fut.with_transaction_id(txn_id); @@ -2051,33 +2051,37 @@ impl Room { /// Creates the inner [`MessageType`] for an already-uploaded media file /// provided by its source. - fn make_attachment_message( + #[allow(clippy::too_many_arguments)] + fn make_attachment_type( &self, content_type: &Mime, - source: MediaSource, - thumbnail_source: Option, - thumbnail_info: Option>, filename: &str, - config: AttachmentConfig, + source: MediaSource, + caption: Option, + formatted_caption: Option, + info: Option, + thumbnail: Option<(MediaSource, Box)>, ) -> MessageType { - // if config.caption is set, use it as body, and filename as the file name - // otherwise, body is the filename, and the filename is not set. + // If caption is set, use it as body, and filename as the file name; otherwise, + // body is the filename, and the filename is not set. // https://github.com/tulir/matrix-spec-proposals/blob/body-as-caption/proposals/2530-body-as-caption.md - let (body, filename) = match config.caption { + let (body, filename) = match caption { Some(caption) => (caption, Some(filename.to_owned())), None => (filename.to_owned(), None), }; + let (thumbnail_source, thumbnail_info) = thumbnail.unzip(); + match content_type.type_() { mime::IMAGE => { - let info = assign!(config.info.map(ImageInfo::from).unwrap_or_default(), { + let info = assign!(info.map(ImageInfo::from).unwrap_or_default(), { mimetype: Some(content_type.as_ref().to_owned()), thumbnail_source, thumbnail_info }); let content = assign!(ImageMessageEventContent::new(body, source), { info: Some(Box::new(info)), - formatted: config.formatted_caption, + formatted: formatted_caption, filename }); MessageType::Image(content) @@ -2087,7 +2091,7 @@ impl Room { let mut content = AudioMessageEventContent::new(body, source); if let Some(AttachmentInfo::Voice { audio_info, waveform: Some(waveform_vec) }) = - &config.info + &info { if let Some(duration) = audio_info.duration { let waveform = waveform_vec.iter().map(|v| (*v).into()).collect(); @@ -2097,7 +2101,7 @@ impl Room { content.voice = Some(UnstableVoiceContentBlock::new()); } - let mut audio_info = config.info.map(AudioInfo::from).unwrap_or_default(); + let mut audio_info = info.map(AudioInfo::from).unwrap_or_default(); audio_info.mimetype = Some(content_type.as_ref().to_owned()); let content = content.info(Box::new(audio_info)); @@ -2105,21 +2109,21 @@ impl Room { } mime::VIDEO => { - let info = assign!(config.info.map(VideoInfo::from).unwrap_or_default(), { + let info = assign!(info.map(VideoInfo::from).unwrap_or_default(), { mimetype: Some(content_type.as_ref().to_owned()), thumbnail_source, thumbnail_info }); let content = assign!(VideoMessageEventContent::new(body, source), { info: Some(Box::new(info)), - formatted: config.formatted_caption, + formatted: formatted_caption, filename }); MessageType::Video(content) } _ => { - let info = assign!(config.info.map(FileInfo::from).unwrap_or_default(), { + let info = assign!(info.map(FileInfo::from).unwrap_or_default(), { mimetype: Some(content_type.as_ref().to_owned()), thumbnail_source, thumbnail_info @@ -2132,6 +2136,19 @@ impl Room { } } + /// Creates the [`RoomMessageEventContent`] based on the message type and + /// mentions. + fn make_attachment_event( + msg_type: MessageType, + mentions: Option, + ) -> RoomMessageEventContent { + let mut content = RoomMessageEventContent::new(msg_type); + if let Some(mentions) = mentions { + content = content.add_mentions(mentions); + } + content + } + /// Update the power levels of a select set of users of this room. /// /// Issue a `power_levels` state event request to the server, changing the @@ -2936,7 +2953,7 @@ impl Room { pub async fn set_unread_flag(&self, unread: bool) -> Result<()> { let user_id = self.client.user_id().ok_or(Error::AuthenticationRequired)?; - let content = MarkedUnreadEventContent::new(unread); + let content = UnstableMarkedUnreadEventContent::from(MarkedUnreadEventContent::new(unread)); let request = set_room_account_data::v3::Request::new( user_id.to_owned(), @@ -3181,6 +3198,31 @@ impl Room { }); (handle, receiver) + + /// Load pinned state events for a room from the `/state` endpoint in the + /// home server. + pub async fn load_pinned_events(&self) -> Result>> { + let response = self + .client + .send( + get_state_events_for_key::v3::Request::new( + self.room_id().to_owned(), + StateEventType::RoomPinnedEvents, + "".to_owned(), + ), + None, + ) + .await; + + match response { + Ok(response) => { + Ok(Some(response.content.deserialize_as::()?.pinned)) + } + Err(http_error) => match http_error.as_client_api_error() { + Some(error) if error.status_code == StatusCode::NOT_FOUND => Ok(None), + _ => Err(http_error.into()), + }, + } } } diff --git a/crates/matrix-sdk/src/room_directory_search.rs b/crates/matrix-sdk/src/room_directory_search.rs index 3ac5893b1bd..8a4746d4dd8 100644 --- a/crates/matrix-sdk/src/room_directory_search.rs +++ b/crates/matrix-sdk/src/room_directory_search.rs @@ -24,7 +24,7 @@ use ruma::{ OwnedMxcUri, OwnedRoomAliasId, OwnedRoomId, }; -use crate::{Client, Result}; +use crate::{Client, OwnedServerName, Result}; /// This struct represents a single result of a room directory search. /// @@ -108,7 +108,7 @@ impl SearchState { /// let homeserver = Url::parse("http://localhost:8080")?; /// let client = Client::new(homeserver).await?; /// let mut room_directory_search = RoomDirectorySearch::new(client); -/// room_directory_search.search(None, 10).await?; +/// room_directory_search.search(None, 10, None).await?; /// let (results, mut stream) = room_directory_search.results(); /// room_directory_search.next_page().await?; /// anyhow::Ok(()) @@ -118,6 +118,7 @@ impl SearchState { pub struct RoomDirectorySearch { batch_size: u32, filter: Option, + server: Option, search_state: SearchState, client: Client, results: ObservableVector, @@ -129,6 +130,7 @@ impl RoomDirectorySearch { Self { batch_size: 0, filter: None, + server: None, search_state: Default::default(), client, results: ObservableVector::new(), @@ -138,17 +140,26 @@ impl RoomDirectorySearch { /// Starts a filtered search for the server. /// /// If the `filter` is not provided it will search for all the rooms. - /// You can specify a `batch_size`` to control the number of rooms to fetch + /// You can specify a `batch_size` to control the number of rooms to fetch /// per request. /// + /// If the `via_server` is not provided it will search in the current + /// homeserver by default. + /// /// This method will clear the current search results and start a new one. // Should never be used concurrently with another `next_page` or a // `search`. - pub async fn search(&mut self, filter: Option, batch_size: u32) -> Result<()> { + pub async fn search( + &mut self, + filter: Option, + batch_size: u32, + via_server: Option, + ) -> Result<()> { self.filter = filter; self.batch_size = batch_size; self.search_state = Default::default(); self.results.clear(); + self.server = via_server; self.next_page().await } @@ -165,6 +176,7 @@ impl RoomDirectorySearch { let mut request = PublicRoomsFilterRequest::new(); request.filter = filter; + request.server = self.server.clone(); request.limit = Some(self.batch_size.into()); request.since = self.search_state.next_token().map(ToOwned::to_owned); @@ -196,7 +208,7 @@ impl RoomDirectorySearch { (self.results.len() as f64 / self.batch_size as f64).ceil() as usize } - /// Get whether the search is at the last page + /// Get whether the search is at the last page. pub fn is_at_last_page(&self) -> bool { self.search_state.is_at_end() } @@ -208,7 +220,7 @@ mod tests { use eyeball_im::VectorDiff; use futures_util::StreamExt; use matrix_sdk_test::{async_test, test_json}; - use ruma::{directory::Filter, serde::Raw, RoomAliasId, RoomId}; + use ruma::{directory::Filter, owned_server_name, serde::Raw, RoomAliasId, RoomId}; use serde_json::Value as JsonValue; use stream_assert::assert_pending; use wiremock::{ @@ -302,7 +314,8 @@ mod tests { .mount(&server) .await; - room_directory_search.search(None, 1).await.unwrap(); + let via_server = owned_server_name!("some.server.org"); + room_directory_search.search(None, 1, Some(via_server)).await.unwrap(); let (results, mut stream) = room_directory_search.results(); assert_pending!(stream); assert_eq!(results.len(), 1); @@ -321,7 +334,7 @@ mod tests { .mount(&server) .await; - room_directory_search.search(None, 1).await.unwrap(); + room_directory_search.search(None, 1, None).await.unwrap(); let (initial_results, mut stream) = room_directory_search.results(); assert_eq!(initial_results, vec![get_first_page_description()].into()); assert!(!room_directory_search.is_at_last_page()); @@ -376,7 +389,7 @@ mod tests { .mount(&server) .await; - room_directory_search.search(None, 1).await.unwrap(); + room_directory_search.search(None, 1, None).await.unwrap(); let (results, mut stream) = room_directory_search.results(); assert_eq!(results, vec![get_first_page_description()].into()); @@ -414,7 +427,7 @@ mod tests { .mount(&server) .await; - room_directory_search.search(Some("bleecker.street".into()), 1).await.unwrap(); + room_directory_search.search(Some("bleecker.street".into()), 1, None).await.unwrap(); let (initial_results, mut stream) = room_directory_search.results(); assert_eq!(initial_results, vec![get_first_page_description()].into()); assert!(!room_directory_search.is_at_last_page()); @@ -450,7 +463,7 @@ mod tests { .mount(&server) .await; - room_directory_search.search(None, 1).await.unwrap(); + room_directory_search.search(None, 1, None).await.unwrap(); let (initial_results, mut stream) = room_directory_search.results(); assert_eq!(initial_results, vec![get_first_page_description()].into()); assert!(!room_directory_search.is_at_last_page()); @@ -465,7 +478,7 @@ mod tests { .mount(&server) .await; - room_directory_search.search(Some("bleecker.street".into()), 1).await.unwrap(); + room_directory_search.search(Some("bleecker.street".into()), 1, None).await.unwrap(); let results_batch: Vec> = stream.next().await.unwrap(); assert_matches!(&results_batch[0], VectorDiff::Clear); diff --git a/crates/matrix-sdk/src/send_queue.rs b/crates/matrix-sdk/src/send_queue.rs index 9e0b14aeceb..6a330844f0d 100644 --- a/crates/matrix-sdk/src/send_queue.rs +++ b/crates/matrix-sdk/src/send_queue.rs @@ -38,7 +38,7 @@ //! - enable/disable them all at once with [`SendQueue::set_enabled()`]. //! - get notifications about send errors with [`SendQueue::subscribe_errors`]. //! - reload all unsent events that had been persisted in storage using -//! [`SendQueue::respawn_tasks_for_rooms_with_unsent_events()`]. It is +//! [`SendQueue::respawn_tasks_for_rooms_with_unsent_requests()`]. It is //! recommended to call this method during initialization of a client, //! otherwise persisted unsent events will only be re-sent after the send //! queue for the given room has been reopened for the first time. @@ -53,8 +53,8 @@ use std::{ use matrix_sdk_base::{ store::{ - ChildTransactionId, DependentQueuedEvent, DependentQueuedEventKind, QueueWedgeError, - QueuedEvent, SerializableEventContent, + ChildTransactionId, DependentQueuedRequest, DependentQueuedRequestKind, QueueWedgeError, + QueuedRequest, QueuedRequestKind, SentRequestKey, SerializableEventContent, }, RoomState, StoreError, }; @@ -65,7 +65,7 @@ use ruma::{ EventContent as _, }, serde::Raw, - EventId, OwnedEventId, OwnedRoomId, OwnedTransactionId, TransactionId, + OwnedEventId, OwnedRoomId, OwnedTransactionId, TransactionId, }; use tokio::sync::{broadcast, Notify, RwLock}; use tracing::{debug, error, info, instrument, trace, warn}; @@ -97,16 +97,16 @@ impl SendQueue { Self { client } } - /// Reload all the rooms which had unsent events, and respawn tasks for + /// Reload all the rooms which had unsent requests, and respawn tasks for /// those rooms. - pub async fn respawn_tasks_for_rooms_with_unsent_events(&self) { + pub async fn respawn_tasks_for_rooms_with_unsent_requests(&self) { if !self.is_enabled() { return; } let room_ids = - self.client.store().load_rooms_with_unsent_events().await.unwrap_or_else(|err| { - warn!("error when loading rooms with unsent events: {err}"); + self.client.store().load_rooms_with_unsent_requests().await.unwrap_or_else(|err| { + warn!("error when loading rooms with unsent requests: {err}"); Vec::new() }); @@ -118,11 +118,14 @@ impl SendQueue { } } + /// Tiny helper to get the send queue's global context from the [`Client`]. #[inline(always)] fn data(&self) -> &SendQueueData { &self.client.inner.send_queue_data } + /// Get or create a new send queue for a given room, and insert it into our + /// memoized rooms mapping. fn for_room(&self, room: Room) -> RoomSendQueue { let data = self.data(); @@ -141,7 +144,9 @@ impl SendQueue { &self.client, owned_room_id.clone(), ); + map.insert(owned_room_id, room_q.clone()); + room_q } @@ -150,9 +155,9 @@ impl SendQueue { /// If we're disabling the queue, and requests were being sent, they're not /// aborted, and will continue until a status resolves (error responses /// will keep the events in the buffer of events to send later). The - /// disablement will happen before the next event is sent. + /// disablement will happen before the next request is sent. /// - /// This may wake up background tasks and resume sending of events in the + /// This may wake up background tasks and resume sending of requests in the /// background. pub async fn set_enabled(&self, enabled: bool) { debug!(?enabled, "setting global send queue enablement"); @@ -165,8 +170,8 @@ impl SendQueue { } // Reload some extra rooms that might not have been awaken yet, but could have - // events from previous sessions. - self.respawn_tasks_for_rooms_with_unsent_events().await; + // requests from previous sessions. + self.respawn_tasks_for_rooms_with_unsent_requests().await; } /// Returns whether the send queue is enabled, at a client-wide @@ -176,32 +181,32 @@ impl SendQueue { } /// A subscriber to the enablement status (enabled or disabled) of the - /// send queue. + /// send queue, along with useful errors. pub fn subscribe_errors(&self) -> broadcast::Receiver { self.data().error_reporter.subscribe() } } -/// A specific room ran into an error, and has disabled itself. +/// A specific room's send queue ran into an error, and it has disabled itself. #[derive(Clone, Debug)] pub struct SendQueueRoomError { - /// Which room is failing? + /// For which room is the send queue failing? pub room_id: OwnedRoomId, - /// The error the room has ran into, when trying to send an event. + /// The error the room has ran into, when trying to send a request. pub error: Arc, /// Whether the error is considered recoverable or not. /// /// An error that's recoverable will disable the room's send queue, while an - /// unrecoverable error will be parked, until the user decides to cancel - /// sending it. + /// unrecoverable error will be parked, until the user decides to do + /// something about it. pub is_recoverable: bool, } impl Client { /// Returns a [`SendQueue`] that handles sending, retrying and not - /// forgetting about messages that are to be sent. + /// forgetting about requests that are to be sent. pub fn send_queue(&self) -> SendQueue { SendQueue::new(self.clone()) } @@ -321,10 +326,11 @@ impl RoomSendQueue { /// the [`Self::subscribe()`] method to get updates about the sending of /// that event. /// - /// By default, if sending the event fails on the first attempt, it will be - /// retried a few times. If sending failed, the entire client's sending - /// queue will be disabled, and it will need to be manually re-enabled - /// by the caller. + /// By default, if sending failed on the first attempt, it will be retried a + /// few times. If sending failed after those retries, the entire + /// client's sending queue will be disabled, and it will need to be + /// manually re-enabled by the caller (e.g. after network is back, or when + /// something has been done about the faulty requests). pub async fn send_raw( &self, content: Raw, @@ -339,7 +345,7 @@ impl RoomSendQueue { let content = SerializableEventContent::from_raw(content, event_type); - let transaction_id = self.inner.queue.push(content.clone()).await?; + let transaction_id = self.inner.queue.push(content.clone().into()).await?; trace!(%transaction_id, "manager sends a raw event to the background task"); self.inner.notifier.notify_one(); @@ -368,10 +374,11 @@ impl RoomSendQueue { /// the [`Self::subscribe()`] method to get updates about the sending of /// that event. /// - /// By default, if sending the event fails on the first attempt, it will be - /// retried a few times. If sending failed, the entire client's sending - /// queue will be disabled, and it will need to be manually re-enabled - /// by the caller. + /// By default, if sending failed on the first attempt, it will be retried a + /// few times. If sending failed after those retries, the entire + /// client's sending queue will be disabled, and it will need to be + /// manually re-enabled by the caller (e.g. after network is back, or when + /// something has been done about the faulty requests). pub async fn send( &self, content: AnyMessageLikeEventContent, @@ -383,8 +390,8 @@ impl RoomSendQueue { .await } - /// Returns the current local events as well as a receiver to listen to the - /// send queue updates, as defined in [`RoomSendQueueUpdate`]. + /// Returns the current local requests as well as a receiver to listen to + /// the send queue updates, as defined in [`RoomSendQueueUpdate`]. pub async fn subscribe( &self, ) -> Result<(Vec, broadcast::Receiver), RoomSendQueueError> @@ -394,6 +401,11 @@ impl RoomSendQueue { Ok((local_echoes, self.inner.updates.subscribe())) } + /// A task that must be spawned in the async runtime, running in the + /// background for each room that has a send queue. + /// + /// It only progresses forward: nothing can be cancelled at any point, which + /// makes the implementation not overly complicated to follow. #[instrument(skip_all, fields(room_id = %room.room_id()))] async fn sending_task( room: WeakRoom, @@ -413,10 +425,10 @@ impl RoomSendQueue { break; } - // Try to apply dependent events now; those applying to previously failed + // Try to apply dependent requests now; those applying to previously failed // attempts (local echoes) would succeed now. - if let Err(err) = queue.apply_dependent_events().await { - warn!("errors when applying dependent events: {err}"); + if let Err(err) = queue.apply_dependent_requests().await { + warn!("errors when applying dependent requests: {err}"); } if !locally_enabled.load(Ordering::SeqCst) { @@ -426,8 +438,8 @@ impl RoomSendQueue { continue; } - let queued_event = match queue.peek_next_to_send().await { - Ok(Some(event)) => event, + let queued_request = match queue.peek_next_to_send().await { + Ok(Some(request)) => request, Ok(None) => { trace!("queue is empty, sleeping"); @@ -437,12 +449,12 @@ impl RoomSendQueue { } Err(err) => { - warn!("error when loading next event to send: {err}"); + warn!("error when loading next request to send: {err}"); continue; } }; - trace!(txn_id = %queued_event.transaction_id, "received an event to send!"); + trace!(txn_id = %queued_request.transaction_id, "received a request to send!"); let Some(room) = room.get() else { if is_dropping.load(Ordering::SeqCst) { @@ -452,29 +464,24 @@ impl RoomSendQueue { continue; }; - let (event, event_type) = queued_event.event.raw(); - match room - .send_raw(&event_type.to_string(), event) - .with_transaction_id(&queued_event.transaction_id) - .with_request_config(RequestConfig::short_retry()) - .await - { - Ok(res) => { - trace!(txn_id = %queued_event.transaction_id, event_id = %res.event_id, "successfully sent"); - - match queue.mark_as_sent(&queued_event.transaction_id, &res.event_id).await { - Ok(()) => { + match Self::handle_request(&room, &queued_request).await { + Ok(parent_key) => match queue + .mark_as_sent(&queued_request.transaction_id, parent_key.clone()) + .await + { + Ok(()) => match parent_key { + SentRequestKey::Event(event_id) => { let _ = updates.send(RoomSendQueueUpdate::SentEvent { - transaction_id: queued_event.transaction_id, - event_id: res.event_id, + transaction_id: queued_request.transaction_id, + event_id, }); } + }, - Err(err) => { - warn!("unable to mark queued event as sent: {err}"); - } + Err(err) => { + warn!("unable to mark queued request as sent: {err}"); } - } + }, Err(err) => { let is_recoverable = match err { @@ -497,35 +504,34 @@ impl RoomSendQueue { }; if is_recoverable { - warn!(txn_id = %queued_event.transaction_id, error = ?err, "Recoverable error when sending event: {err}, disabling send queue"); + warn!(txn_id = %queued_request.transaction_id, error = ?err, "Recoverable error when sending request: {err}, disabling send queue"); - // In this case, we intentionally keep the event in the queue, but mark it + // In this case, we intentionally keep the request in the queue, but mark it // as not being sent anymore. - queue.mark_as_not_being_sent(&queued_event.transaction_id).await; + queue.mark_as_not_being_sent(&queued_request.transaction_id).await; // Let observers know about a failure *after* we've marked the item as not // being sent anymore. Otherwise, there's a possible race where a caller - // might try to remove an item, while it's still - // marked as being sent, resulting in a cancellation - // failure. + // might try to remove an item, while it's still marked as being sent, + // resulting in a cancellation failure. // Disable the queue for this room after a recoverable error happened. This // should be the sign that this error is temporary (maybe network // disconnected, maybe the server had a hiccup). locally_enabled.store(false, Ordering::SeqCst); } else { - warn!(txn_id = %queued_event.transaction_id, error = ?err, "Unrecoverable error when sending event: {err}"); + warn!(txn_id = %queued_request.transaction_id, error = ?err, "Unrecoverable error when sending request: {err}"); - // Mark the event as wedged, so it's not picked at any future point. + // Mark the request as wedged, so it's not picked at any future point. if let Err(storage_error) = queue .mark_as_wedged( - &queued_event.transaction_id, + &queued_request.transaction_id, QueueWedgeError::from(&err), ) .await { - warn!("unable to mark event as wedged: {storage_error}"); + warn!("unable to mark request as wedged: {storage_error}"); } } @@ -538,7 +544,7 @@ impl RoomSendQueue { }); let _ = updates.send(RoomSendQueueUpdate::SendError { - transaction_id: queued_event.transaction_id, + transaction_id: queued_request.transaction_id, error, is_recoverable, }); @@ -549,6 +555,27 @@ impl RoomSendQueue { info!("exited sending task"); } + /// Handles a single request and returns the [`SentRequestKey`] on success. + async fn handle_request( + room: &Room, + request: &QueuedRequest, + ) -> Result { + match &request.kind { + QueuedRequestKind::Event { content } => { + let (event, event_type) = content.raw(); + + let res = room + .send_raw(event_type, event) + .with_transaction_id(&request.transaction_id) + .with_request_config(RequestConfig::short_retry()) + .await?; + + trace!(txn_id = %request.transaction_id, event_id = %res.event_id, "event successfully sent"); + Ok(SentRequestKey::Event(res.event_id)) + } + } + } + /// Returns whether the room is enabled, at the room level. pub fn is_enabled(&self) -> bool { self.inner.locally_enabled.load(Ordering::SeqCst) @@ -574,7 +601,7 @@ impl RoomSendQueue { .await .map_err(RoomSendQueueError::StorageError)?; - // Wake up the queue, in case the room was asleep before unwedging the event. + // Wake up the queue, in case the room was asleep before unwedging the request. self.inner.notifier.notify_one(); let _ = self @@ -595,14 +622,17 @@ impl From<&crate::Error> for QueueWedgeError { SessionRecipientCollectionError::VerifiedUserHasUnsignedDevice(user_map) => { QueueWedgeError::InsecureDevices { user_device_map: user_map.clone() } } + SessionRecipientCollectionError::VerifiedUserChangedIdentity(users) => { QueueWedgeError::IdentityViolations { users: users.clone() } } + SessionRecipientCollectionError::CrossSigningNotSetup | SessionRecipientCollectionError::SendingFromUnverifiedDevice => { QueueWedgeError::CrossVerificationRequired } }, + _ => QueueWedgeError::GenericApiError { msg: value.to_string() }, } } @@ -612,23 +642,24 @@ struct RoomSendQueueInner { /// The room which this send queue relates to. room: WeakRoom, - /// Broadcaster for notifications about the statuses of events to be sent. + /// Broadcaster for notifications about the statuses of requests to be sent. /// /// Can be subscribed to from the outside. updates: broadcast::Sender, - /// Queue of events that are either to be sent, or being sent. + /// Queue of requests that are either to be sent, or being sent. /// - /// When an event has been sent to the server, it is removed from that queue - /// *after* being sent. That way, we will retry sending upon failure, in - /// the same order events have been inserted in the first place. + /// When a request has been sent to the server, it is removed from that + /// queue *after* being sent. That way, we will retry sending upon + /// failure, in the same order requests have been inserted in the first + /// place. queue: QueueStorage, /// A notifier that's updated any time common data is touched (stopped or /// enabled statuses), or the associated room [`QueueStorage`]. notifier: Arc, - /// Should the room process new events or not (because e.g. it might be + /// Should the room process new requests or not (because e.g. it might be /// running off the network)? locally_enabled: Arc, @@ -645,14 +676,14 @@ struct QueueStorage { /// To which room is this storage related. room_id: OwnedRoomId, - /// All the queued events that are being sent at the moment. + /// All the queued requests that are being sent at the moment. /// /// It also serves as an internal lock on the storage backend. being_sent: Arc>>, } impl QueueStorage { - /// Create a new synchronized queue for queuing events to be sent later. + /// Create a new queue for queuing requests to be sent later. fn new(client: WeakClient, room: OwnedRoomId) -> Self { Self { room_id: room, being_sent: Default::default(), client } } @@ -667,45 +698,46 @@ impl QueueStorage { /// Returns the transaction id chosen to identify the request. async fn push( &self, - serializable: SerializableEventContent, + request: QueuedRequestKind, ) -> Result { let transaction_id = TransactionId::new(); self.client()? .store() - .save_send_queue_event(&self.room_id, transaction_id.clone(), serializable) + .save_send_queue_request(&self.room_id, transaction_id.clone(), request) .await?; Ok(transaction_id) } - /// Peeks the next event to be sent, marking it as being sent. + /// Peeks the next request to be sent, marking it as being sent. /// /// It is required to call [`Self::mark_as_sent`] after it's been /// effectively sent. - async fn peek_next_to_send(&self) -> Result, RoomSendQueueStorageError> { + async fn peek_next_to_send(&self) -> Result, RoomSendQueueStorageError> { // Keep the lock until we're done touching the storage. let mut being_sent = self.being_sent.write().await; - let queued_events = self.client()?.store().load_send_queue_events(&self.room_id).await?; + let queued_requests = + self.client()?.store().load_send_queue_requests(&self.room_id).await?; - if let Some(event) = queued_events.iter().find(|queued| !queued.is_wedged()) { - being_sent.insert(event.transaction_id.clone()); + if let Some(request) = queued_requests.iter().find(|queued| !queued.is_wedged()) { + being_sent.insert(request.transaction_id.clone()); - Ok(Some(event.clone())) + Ok(Some(request.clone())) } else { Ok(None) } } - /// Marks an event popped with [`Self::peek_next_to_send`] and identified + /// Marks a request popped with [`Self::peek_next_to_send`] and identified /// with the given transaction id as not being sent anymore, so it can /// be removed from the queue later. async fn mark_as_not_being_sent(&self, transaction_id: &TransactionId) { self.being_sent.write().await.remove(transaction_id); } - /// Marks an event popped with [`Self::peek_next_to_send`] and identified + /// Marks a request popped with [`Self::peek_next_to_send`] and identified /// with the given transaction id as being wedged (and not being sent /// anymore), so it can be removed from the queue later. async fn mark_as_wedged( @@ -720,11 +752,11 @@ impl QueueStorage { Ok(self .client()? .store() - .update_send_queue_event_status(&self.room_id, transaction_id, Some(reason)) + .update_send_queue_request_status(&self.room_id, transaction_id, Some(reason)) .await?) } - /// Marks an event identified with the given transaction id as being now + /// Marks a request identified with the given transaction id as being now /// unwedged and adds it back to the queue. async fn mark_as_unwedged( &self, @@ -733,16 +765,16 @@ impl QueueStorage { Ok(self .client()? .store() - .update_send_queue_event_status(&self.room_id, transaction_id, None) + .update_send_queue_request_status(&self.room_id, transaction_id, None) .await?) } - /// Marks an event pushed with [`Self::push`] and identified with the given - /// transaction id as sent by removing it from the local queue. + /// Marks a request pushed with [`Self::push`] and identified with the given + /// transaction id as sent, by removing it from the local queue. async fn mark_as_sent( &self, transaction_id: &TransactionId, - event_id: &EventId, + parent_key: SentRequestKey, ) -> Result<(), RoomSendQueueStorageError> { // Keep the lock until we're done touching the storage. let mut being_sent = self.being_sent.write().await; @@ -751,15 +783,13 @@ impl QueueStorage { let client = self.client()?; let store = client.store(); - // Update all dependent events. - store - .update_dependent_send_queue_event(&self.room_id, transaction_id, event_id.to_owned()) - .await?; + // Update all dependent requests. + store.update_dependent_queued_request(&self.room_id, transaction_id, parent_key).await?; - let removed = store.remove_send_queue_event(&self.room_id, transaction_id).await?; + let removed = store.remove_send_queue_request(&self.room_id, transaction_id).await?; if !removed { - warn!(txn_id = %transaction_id, "event marked as sent was missing from storage"); + warn!(txn_id = %transaction_id, "request marked as sent was missing from storage"); } Ok(()) @@ -770,8 +800,8 @@ impl QueueStorage { /// /// Returns whether the given transaction has been effectively removed. If /// false, this either means that the transaction id was unrelated to - /// this queue, or that the event was sent before we cancelled it. - async fn cancel( + /// this queue, or that the request was sent before we cancelled it. + async fn cancel_event( &self, transaction_id: &TransactionId, ) -> Result { @@ -782,11 +812,11 @@ impl QueueStorage { // Save the intent to redact the event. self.client()? .store() - .save_dependent_send_queue_event( + .save_dependent_queued_request( &self.room_id, transaction_id, ChildTransactionId::new(), - DependentQueuedEventKind::Redact, + DependentQueuedRequestKind::RedactEvent, ) .await?; @@ -794,19 +824,18 @@ impl QueueStorage { } let removed = - self.client()?.store().remove_send_queue_event(&self.room_id, transaction_id).await?; + self.client()?.store().remove_send_queue_request(&self.room_id, transaction_id).await?; Ok(removed) } - /// Replace an event that has been sent with - /// [`Self::push`] with the given transaction id, before it's been actually - /// sent. + /// Replace an event that has been sent with [`Self::push`] with the given + /// transaction id, before it's been actually sent. /// /// Returns whether the given transaction has been effectively edited. If /// false, this either means that the transaction id was unrelated to - /// this queue, or that the event was sent before we edited it. - async fn replace( + /// this queue, or that the request was sent before we edited it. + async fn replace_event( &self, transaction_id: &TransactionId, serializable: SerializableEventContent, @@ -815,14 +844,14 @@ impl QueueStorage { let being_sent = self.being_sent.read().await; if being_sent.contains(transaction_id) { - // Save the intent to redact the event. + // Save the intent to edit the associated event. self.client()? .store() - .save_dependent_send_queue_event( + .save_dependent_queued_request( &self.room_id, transaction_id, ChildTransactionId::new(), - DependentQueuedEventKind::Edit { new_content: serializable }, + DependentQueuedRequestKind::EditEvent { new_content: serializable }, ) .await?; @@ -832,12 +861,13 @@ impl QueueStorage { let edited = self .client()? .store() - .update_send_queue_event(&self.room_id, transaction_id, serializable) + .update_send_queue_request(&self.room_id, transaction_id, serializable.into()) .await?; Ok(edited) } + /// Reacts to the given local echo of an event. #[instrument(skip(self))] async fn react( &self, @@ -847,28 +877,28 @@ impl QueueStorage { let client = self.client()?; let store = client.store(); - let queued_events = store.load_send_queue_events(&self.room_id).await?; + let requests = store.load_send_queue_requests(&self.room_id).await?; - // If the event has been already sent, abort immediately. - if !queued_events.iter().any(|item| item.transaction_id == transaction_id) { + // If the target event has been already sent, abort immediately. + if !requests.iter().any(|item| item.transaction_id == transaction_id) { return Ok(None); } - // Record the dependent event. + // Record the dependent request. let reaction_txn_id = ChildTransactionId::new(); store - .save_dependent_send_queue_event( + .save_dependent_queued_request( &self.room_id, transaction_id, reaction_txn_id.clone(), - DependentQueuedEventKind::React { key }, + DependentQueuedRequestKind::ReactEvent { key }, ) .await?; Ok(Some(reaction_txn_id)) } - /// Returns a list of the local echoes, that is, all the events that we're + /// Returns a list of the local echoes, that is, all the requests that we're /// about to send but that haven't been sent yet (or are being sent). async fn local_echoes( &self, @@ -877,61 +907,74 @@ impl QueueStorage { let client = self.client()?; let store = client.store(); - let local_events = - store.load_send_queue_events(&self.room_id).await?.into_iter().map(|queued| { + let local_requests = + store.load_send_queue_requests(&self.room_id).await?.into_iter().map(|queued| { LocalEcho { transaction_id: queued.transaction_id.clone(), - content: LocalEchoContent::Event { - serialized_event: queued.event, - send_handle: SendHandle { - room: room.clone(), - transaction_id: queued.transaction_id, + content: match queued.kind { + QueuedRequestKind::Event { content } => LocalEchoContent::Event { + serialized_event: content, + send_handle: SendHandle { + room: room.clone(), + transaction_id: queued.transaction_id, + }, + send_error: queued.error, }, - send_error: queued.error, }, } }); - let local_reactions = store - .list_dependent_send_queue_events(&self.room_id) - .await? - .into_iter() - .filter_map(|dep| match dep.kind { - DependentQueuedEventKind::Edit { .. } | DependentQueuedEventKind::Redact => None, - DependentQueuedEventKind::React { key } => Some(LocalEcho { - transaction_id: dep.own_transaction_id.clone().into(), - content: LocalEchoContent::React { - key, - send_handle: SendReactionHandle { - room: room.clone(), - transaction_id: dep.own_transaction_id, + let local_reactions = + store.load_dependent_queued_requests(&self.room_id).await?.into_iter().filter_map( + |dep| match dep.kind { + DependentQueuedRequestKind::EditEvent { .. } + | DependentQueuedRequestKind::RedactEvent => { + // TODO: reflect local edits/redacts too? + None + } + DependentQueuedRequestKind::ReactEvent { key } => Some(LocalEcho { + transaction_id: dep.own_transaction_id.clone().into(), + content: LocalEchoContent::React { + key, + send_handle: SendReactionHandle { + room: room.clone(), + transaction_id: dep.own_transaction_id, + }, + applies_to: dep.parent_transaction_id, }, - applies_to: dep.parent_transaction_id, - }, - }), - }); + }), + }, + ); - Ok(local_events.chain(local_reactions).collect()) + Ok(local_requests.chain(local_reactions).collect()) } - /// Try to apply a single dependent event, whether it's local or remote. + /// Try to apply a single dependent request, whether it's local or remote. /// /// This swallows errors that would retrigger every time if we retried - /// applying the dependent event: invalid edit content, etc. + /// applying the dependent request: invalid edit content, etc. /// - /// Returns true if the dependent event has been sent (or should not be + /// Returns true if the dependent request has been sent (or should not be /// retried later). #[instrument(skip_all)] - async fn try_apply_single_dependent_event( + async fn try_apply_single_dependent_request( &self, client: &Client, - de: DependentQueuedEvent, + dependent_request: DependentQueuedRequest, ) -> Result { let store = client.store(); - match de.kind { - DependentQueuedEventKind::Edit { new_content } => { - if let Some(event_id) = de.event_id { + let parent_key = dependent_request.parent_key; + + match dependent_request.kind { + DependentQueuedRequestKind::EditEvent { new_content } => { + if let Some(parent_key) = parent_key { + let Some(event_id) = parent_key.into_event_id() else { + return Err(RoomSendQueueError::StorageError( + RoomSendQueueStorageError::InvalidParentKey, + )); + }; + // The parent event has been sent, so send an edit event. let room = client .get_room(&self.room_id) @@ -981,21 +1024,20 @@ impl QueueStorage { ); store - .save_send_queue_event( + .save_send_queue_request( &self.room_id, - de.own_transaction_id.into(), - serializable, + dependent_request.own_transaction_id.into(), + serializable.into(), ) .await .map_err(RoomSendQueueStorageError::StorageError)?; } else { - // The parent event is still local (sending must have failed); update the local - // echo. + // The parent event is still local; update the local echo. let edited = store - .update_send_queue_event( + .update_send_queue_request( &self.room_id, - &de.parent_transaction_id, - new_content, + &dependent_request.parent_transaction_id, + new_content.into(), ) .await .map_err(RoomSendQueueStorageError::StorageError)?; @@ -1006,8 +1048,14 @@ impl QueueStorage { } } - DependentQueuedEventKind::Redact => { - if let Some(event_id) = de.event_id { + DependentQueuedRequestKind::RedactEvent => { + if let Some(parent_key) = parent_key { + let Some(event_id) = parent_key.into_event_id() else { + return Err(RoomSendQueueError::StorageError( + RoomSendQueueStorageError::InvalidParentKey, + )); + }; + // The parent event has been sent; send a redaction. let room = client .get_room(&self.room_id) @@ -1020,8 +1068,9 @@ impl QueueStorage { // Note: no reason is provided because we materialize the intent of "cancel // sending the parent event". - if let Err(err) = - room.redact(&event_id, None, Some(de.own_transaction_id.into())).await + if let Err(err) = room + .redact(&event_id, None, Some(dependent_request.own_transaction_id.into())) + .await { warn!("error when sending a redact for {event_id}: {err}"); return Ok(false); @@ -1030,7 +1079,10 @@ impl QueueStorage { // The parent event is still local (sending must have failed); redact the local // echo. let removed = store - .remove_send_queue_event(&self.room_id, &de.parent_transaction_id) + .remove_send_queue_request( + &self.room_id, + &dependent_request.parent_transaction_id, + ) .await .map_err(RoomSendQueueStorageError::StorageError)?; @@ -1040,11 +1092,17 @@ impl QueueStorage { } } - DependentQueuedEventKind::React { key } => { - if let Some(event_id) = de.event_id { + DependentQueuedRequestKind::ReactEvent { key } => { + if let Some(parent_key) = parent_key { + let Some(parent_event_id) = parent_key.into_event_id() else { + return Err(RoomSendQueueError::StorageError( + RoomSendQueueStorageError::InvalidParentKey, + )); + }; + // Queue the reaction event in the send queue 🧠. let react_event = - ReactionEventContent::new(Annotation::new(event_id, key)).into(); + ReactionEventContent::new(Annotation::new(parent_event_id, key)).into(); let serializable = SerializableEventContent::from_raw( Raw::new(&react_event) .map_err(RoomSendQueueStorageError::JsonSerialization)?, @@ -1052,10 +1110,10 @@ impl QueueStorage { ); store - .save_send_queue_event( + .save_send_queue_request( &self.room_id, - de.own_transaction_id.into(), - serializable, + dependent_request.own_transaction_id.into(), + serializable.into(), ) .await .map_err(RoomSendQueueStorageError::StorageError)?; @@ -1070,78 +1128,78 @@ impl QueueStorage { } #[instrument(skip(self))] - async fn apply_dependent_events(&self) -> Result<(), RoomSendQueueError> { + async fn apply_dependent_requests(&self) -> Result<(), RoomSendQueueError> { // Keep the lock until we're done touching the storage. let _being_sent = self.being_sent.read().await; let client = self.client()?; let store = client.store(); - let dependent_events = store - .list_dependent_send_queue_events(&self.room_id) + let dependent_requests = store + .load_dependent_queued_requests(&self.room_id) .await .map_err(RoomSendQueueStorageError::StorageError)?; - let num_initial_dependent_events = dependent_events.len(); - if num_initial_dependent_events == 0 { + let num_initial_dependent_requests = dependent_requests.len(); + if num_initial_dependent_requests == 0 { // Returning early here avoids a bit of useless logging. return Ok(()); } - let canonicalized_dependent_events = canonicalize_dependent_events(&dependent_events); + let canonicalized_dependent_requests = canonicalize_dependent_requests(&dependent_requests); // Get rid of the all non-canonical dependent events. - for original in &dependent_events { - if !canonicalized_dependent_events + for original in &dependent_requests { + if !canonicalized_dependent_requests .iter() .any(|canonical| canonical.own_transaction_id == original.own_transaction_id) { store - .remove_dependent_send_queue_event(&self.room_id, &original.own_transaction_id) + .remove_dependent_queued_request(&self.room_id, &original.own_transaction_id) .await .map_err(RoomSendQueueStorageError::StorageError)?; } } - let mut num_dependent_events = canonicalized_dependent_events.len(); + let mut num_dependent_requests = canonicalized_dependent_requests.len(); debug!( - num_dependent_events, - num_initial_dependent_events, "starting handling of dependent events" + num_dependent_requests, + num_initial_dependent_requests, "starting handling of dependent requests" ); - for dependent in canonicalized_dependent_events { + for dependent in canonicalized_dependent_requests { let dependent_id = dependent.own_transaction_id.clone(); - match self.try_apply_single_dependent_event(&client, dependent).await { + match self.try_apply_single_dependent_request(&client, dependent).await { Ok(should_remove) => { if should_remove { - // The dependent event has been successfully applied, forget about it. + // The dependent request has been successfully applied, forget about it. store - .remove_dependent_send_queue_event(&self.room_id, &dependent_id) + .remove_dependent_queued_request(&self.room_id, &dependent_id) .await .map_err(RoomSendQueueStorageError::StorageError)?; - num_dependent_events -= 1; + num_dependent_requests -= 1; } } Err(err) => { - warn!("error when applying single dependent event: {err}"); + warn!("error when applying single dependent request: {err}"); } } } debug!( - leftover_dependent_events = num_dependent_events, - "stopped handling dependent events" + leftover_dependent_requests = num_dependent_requests, + "stopped handling dependent request" ); Ok(()) } - /// Remove a single dependent event from storage. - async fn remove_dependent_send_queue_event( + /// Remove a single dependent request from storage. + async fn remove_dependent_send_queue_request( &self, dependent_event_id: &ChildTransactionId, ) -> Result { @@ -1151,7 +1209,7 @@ impl QueueStorage { Ok(self .client()? .store() - .remove_dependent_send_queue_event(&self.room_id, dependent_event_id) + .remove_dependent_queued_request(&self.room_id, dependent_event_id) .await?) } } @@ -1182,10 +1240,11 @@ pub enum LocalEchoContent { }, } -/// An event that has been locally queued for sending, but hasn't been sent yet. +/// A local representation for a request that hasn't been sent yet to the user's +/// homeserver. #[derive(Clone, Debug)] pub struct LocalEcho { - /// Transaction id used to identify this event. + /// Transaction id used to identify the associated request. pub transaction_id: OwnedTransactionId, /// The content for the local echo. pub content: LocalEchoContent, @@ -1257,8 +1316,9 @@ pub enum RoomSendQueueError { #[error("the room isn't in the joined state")] RoomNotJoined, - /// The room is missing from the client. This could happen if the client is - /// shutting down. + /// The room is missing from the client. + /// + /// This happens only whenever the client is shutting down. #[error("the room is now missing from the client")] RoomDisappeared, @@ -1278,12 +1338,18 @@ pub enum RoomSendQueueStorageError { #[error(transparent)] JsonSerialization(#[from] serde_json::Error), + /// A parent key was expected to be of a certain type, and it was another + /// type instead. + #[error("a dependent event had an invalid parent key type")] + InvalidParentKey, + /// The client is shutting down. #[error("The client is shutting down.")] ClientShuttingDown, } /// A handle to manipulate an event that was scheduled to be sent to a room. +// TODO (bnjbvr): consider renaming `SendEventHandle`, unless we can reuse it for medias too. #[derive(Clone, Debug)] pub struct SendHandle { room: RoomSendQueue, @@ -1299,7 +1365,7 @@ impl SendHandle { pub async fn abort(&self) -> Result { trace!("received an abort request"); - if self.room.inner.queue.cancel(&self.transaction_id).await? { + if self.room.inner.queue.cancel_event(&self.transaction_id).await? { trace!("successful abort"); // Propagate a cancelled update too. @@ -1328,7 +1394,7 @@ impl SendHandle { let serializable = SerializableEventContent::from_raw(new_content, event_type); - if self.room.inner.queue.replace(&self.transaction_id, serializable.clone()).await? { + if self.room.inner.queue.replace_event(&self.transaction_id, serializable.clone()).await? { trace!("successful edit"); // Wake up the queue, in case the room was asleep before the edit. @@ -1421,7 +1487,7 @@ impl SendReactionHandle { /// Will return true if the reaction could be aborted, false if it's been /// sent (and there's no matching local echo anymore). pub async fn abort(&self) -> Result { - if self.room.inner.queue.remove_dependent_send_queue_event(&self.transaction_id).await? { + if self.room.inner.queue.remove_dependent_send_queue_request(&self.transaction_id).await? { // Simple case: the reaction was found in the dependent event list. // Propagate a cancelled update too. @@ -1448,27 +1514,29 @@ impl SendReactionHandle { } } -/// From a given source of [`DependentQueuedEvent`], return only the most +/// From a given source of [`DependentQueuedRequest`], return only the most /// meaningful, i.e. the ones that wouldn't be overridden after applying the /// others. -fn canonicalize_dependent_events(dependent: &[DependentQueuedEvent]) -> Vec { - let mut by_event_id = HashMap::>::new(); +fn canonicalize_dependent_requests( + dependent: &[DependentQueuedRequest], +) -> Vec { + let mut by_event_id = HashMap::>::new(); for d in dependent { let prevs = by_event_id.entry(d.parent_transaction_id.clone()).or_default(); - if prevs.iter().any(|prev| matches!(prev.kind, DependentQueuedEventKind::Redact)) { - // The event has already been flagged for redaction, don't consider the other - // dependent events. + if prevs.iter().any(|prev| matches!(prev.kind, DependentQueuedRequestKind::RedactEvent)) { + // The parent event has already been flagged for redaction, don't consider the + // other dependent events. continue; } match &d.kind { - DependentQueuedEventKind::Edit { .. } => { + DependentQueuedRequestKind::EditEvent { .. } => { // Replace any previous edit with this one. if let Some(prev_edit) = prevs .iter_mut() - .find(|prev| matches!(prev.kind, DependentQueuedEventKind::Edit { .. })) + .find(|prev| matches!(prev.kind, DependentQueuedRequestKind::EditEvent { .. })) { *prev_edit = d; } else { @@ -1476,11 +1544,11 @@ fn canonicalize_dependent_events(dependent: &[DependentQueuedEvent]) -> Vec { + DependentQueuedRequestKind::ReactEvent { .. } => { prevs.push(d); } - DependentQueuedEventKind::Redact => { + DependentQueuedRequestKind::RedactEvent => { // Remove every other dependent action. prevs.clear(); prevs.push(d); @@ -1500,7 +1568,7 @@ mod tests { use assert_matches2::{assert_let, assert_matches}; use matrix_sdk_base::store::{ - ChildTransactionId, DependentQueuedEvent, DependentQueuedEventKind, + ChildTransactionId, DependentQueuedRequest, DependentQueuedRequestKind, SerializableEventContent, }; use matrix_sdk_test::{async_test, JoinedRoomBuilder, SyncResponseBuilder}; @@ -1509,7 +1577,7 @@ mod tests { room_id, TransactionId, }; - use super::canonicalize_dependent_events; + use super::canonicalize_dependent_requests; use crate::{client::WeakClient, test_utils::logged_in_client}; #[async_test] @@ -1562,23 +1630,23 @@ mod tests { // Smoke test: canonicalizing a single dependent event returns it. let txn = TransactionId::new(); - let edit = DependentQueuedEvent { + let edit = DependentQueuedRequest { own_transaction_id: ChildTransactionId::new(), parent_transaction_id: txn.clone(), - kind: DependentQueuedEventKind::Edit { + kind: DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain("edit").into(), ) .unwrap(), }, - event_id: None, + parent_key: None, }; - let res = canonicalize_dependent_events(&[edit]); + let res = canonicalize_dependent_requests(&[edit]); assert_eq!(res.len(), 1); - assert_matches!(&res[0].kind, DependentQueuedEventKind::Edit { .. }); + assert_matches!(&res[0].kind, DependentQueuedRequestKind::EditEvent { .. }); assert_eq!(res[0].parent_transaction_id, txn); - assert!(res[0].event_id.is_none()); + assert!(res[0].parent_key.is_none()); } #[test] @@ -1587,23 +1655,23 @@ mod tests { let txn = TransactionId::new(); let mut inputs = Vec::with_capacity(100); - let redact = DependentQueuedEvent { + let redact = DependentQueuedRequest { own_transaction_id: ChildTransactionId::new(), parent_transaction_id: txn.clone(), - kind: DependentQueuedEventKind::Redact, - event_id: None, + kind: DependentQueuedRequestKind::RedactEvent, + parent_key: None, }; - let edit = DependentQueuedEvent { + let edit = DependentQueuedRequest { own_transaction_id: ChildTransactionId::new(), parent_transaction_id: txn.clone(), - kind: DependentQueuedEventKind::Edit { + kind: DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain("edit").into(), ) .unwrap(), }, - event_id: None, + parent_key: None, }; inputs.push({ @@ -1620,10 +1688,10 @@ mod tests { inputs.push(edit); } - let res = canonicalize_dependent_events(&inputs); + let res = canonicalize_dependent_requests(&inputs); assert_eq!(res.len(), 1); - assert_matches!(&res[0].kind, DependentQueuedEventKind::Redact); + assert_matches!(&res[0].kind, DependentQueuedRequestKind::RedactEvent); assert_eq!(res[0].parent_transaction_id, txn); } @@ -1633,25 +1701,25 @@ mod tests { // The latest edit of a list is always preferred. let inputs = (0..10) - .map(|i| DependentQueuedEvent { + .map(|i| DependentQueuedRequest { own_transaction_id: ChildTransactionId::new(), parent_transaction_id: parent_txn.clone(), - kind: DependentQueuedEventKind::Edit { + kind: DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain(format!("edit{i}")).into(), ) .unwrap(), }, - event_id: None, + parent_key: None, }) .collect::>(); let txn = inputs[9].parent_transaction_id.clone(); - let res = canonicalize_dependent_events(&inputs); + let res = canonicalize_dependent_requests(&inputs); assert_eq!(res.len(), 1); - assert_let!(DependentQueuedEventKind::Edit { new_content } = &res[0].kind); + assert_let!(DependentQueuedRequestKind::EditEvent { new_content } = &res[0].kind); assert_let!( AnyMessageLikeEventContent::RoomMessage(msg) = new_content.deserialize().unwrap() ); @@ -1669,27 +1737,27 @@ mod tests { let inputs = vec![ // This one pertains to txn1. - DependentQueuedEvent { + DependentQueuedRequest { own_transaction_id: child1.clone(), - kind: DependentQueuedEventKind::Redact, + kind: DependentQueuedRequestKind::RedactEvent, parent_transaction_id: txn1.clone(), - event_id: None, + parent_key: None, }, // This one pertains to txn2. - DependentQueuedEvent { + DependentQueuedRequest { own_transaction_id: child2, - kind: DependentQueuedEventKind::Edit { + kind: DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain("edit").into(), ) .unwrap(), }, parent_transaction_id: txn2.clone(), - event_id: None, + parent_key: None, }, ]; - let res = canonicalize_dependent_events(&inputs); + let res = canonicalize_dependent_requests(&inputs); // The canonicalization shouldn't depend per event id. assert_eq!(res.len(), 2); @@ -1697,10 +1765,10 @@ mod tests { for dependent in res { if dependent.own_transaction_id == child1 { assert_eq!(dependent.parent_transaction_id, txn1); - assert_matches!(dependent.kind, DependentQueuedEventKind::Redact); + assert_matches!(dependent.kind, DependentQueuedRequestKind::RedactEvent); } else { assert_eq!(dependent.parent_transaction_id, txn2); - assert_matches!(dependent.kind, DependentQueuedEventKind::Edit { .. }); + assert_matches!(dependent.kind, DependentQueuedRequestKind::EditEvent { .. }); } } } @@ -1711,27 +1779,27 @@ mod tests { let txn = TransactionId::new(); let react_id = ChildTransactionId::new(); - let react = DependentQueuedEvent { + let react = DependentQueuedRequest { own_transaction_id: react_id.clone(), - kind: DependentQueuedEventKind::React { key: "🧠".to_owned() }, + kind: DependentQueuedRequestKind::ReactEvent { key: "🧠".to_owned() }, parent_transaction_id: txn.clone(), - event_id: None, + parent_key: None, }; let edit_id = ChildTransactionId::new(); - let edit = DependentQueuedEvent { + let edit = DependentQueuedRequest { own_transaction_id: edit_id.clone(), - kind: DependentQueuedEventKind::Edit { + kind: DependentQueuedRequestKind::EditEvent { new_content: SerializableEventContent::new( &RoomMessageEventContent::text_plain("edit").into(), ) .unwrap(), }, parent_transaction_id: txn, - event_id: None, + parent_key: None, }; - let res = canonicalize_dependent_events(&[react, edit]); + let res = canonicalize_dependent_requests(&[react, edit]); assert_eq!(res.len(), 2); assert_eq!(res[0].own_transaction_id, edit_id); diff --git a/crates/matrix-sdk/src/sliding_sync/mod.rs b/crates/matrix-sdk/src/sliding_sync/mod.rs index 43258f4f634..322a7563f00 100644 --- a/crates/matrix-sdk/src/sliding_sync/mod.rs +++ b/crates/matrix-sdk/src/sliding_sync/mod.rs @@ -1106,7 +1106,7 @@ mod tests { use matrix_sdk_test::async_test; use ruma::{ api::client::error::ErrorKind, assign, owned_room_id, room_id, serde::Raw, uint, - DeviceKeyAlgorithm, OwnedRoomId, TransactionId, + OwnedRoomId, TransactionId, }; use serde::Deserialize; use serde_json::json; @@ -2689,6 +2689,8 @@ mod tests { #[async_test] #[cfg(feature = "e2e-encryption")] async fn test_process_only_encryption_events() -> Result<()> { + use ruma::OneTimeKeyAlgorithm; + let room = owned_room_id!("!croissant:example.org"); let server = MockServer::start().await; @@ -2705,7 +2707,7 @@ mod tests { extensions: assign!(http::response::Extensions::default(), { e2ee: assign!(http::response::E2EE::default(), { - device_one_time_keys_count: BTreeMap::from([(DeviceKeyAlgorithm::SignedCurve25519, uint!(42))]) + device_one_time_keys_count: BTreeMap::from([(OneTimeKeyAlgorithm::SignedCurve25519, uint!(42))]) }), to_device: Some(assign!(http::response::ToDevice::default(), { next_batch: "to-device-token".to_owned(), diff --git a/crates/matrix-sdk/tests/integration/client.rs b/crates/matrix-sdk/tests/integration/client.rs index 6e044c28567..589087296a5 100644 --- a/crates/matrix-sdk/tests/integration/client.rs +++ b/crates/matrix-sdk/tests/integration/client.rs @@ -1388,5 +1388,5 @@ async fn test_restore_room() { let room = client.get_room(room_id).unwrap(); assert!(room.is_favourite()); - assert!(!room.pinned_event_ids().is_empty()); + assert!(!room.pinned_event_ids().unwrap().is_empty()); } diff --git a/crates/matrix-sdk/tests/integration/encryption/verification.rs b/crates/matrix-sdk/tests/integration/encryption/verification.rs index ab7eb336fec..69bc38514f6 100644 --- a/crates/matrix-sdk/tests/integration/encryption/verification.rs +++ b/crates/matrix-sdk/tests/integration/encryption/verification.rs @@ -20,7 +20,7 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys}, owned_device_id, owned_user_id, serde::Raw, - user_id, DeviceId, DeviceKeyId, OwnedDeviceId, OwnedUserId, + user_id, CrossSigningKeyId, DeviceId, OwnedDeviceId, OwnedUserId, }; use serde_json::json; use wiremock::{ @@ -239,8 +239,10 @@ fn mock_keys_signature_upload(keys: Arc>) -> impl Fn(&Request) -> Re if let Some(existing_master_key) = keys.master.get_mut(&user) { let mut existing = existing_master_key.deserialize().unwrap(); - let target = - DeviceKeyId::from_parts(ruma::DeviceKeyAlgorithm::Ed25519, key_id.into()); + let target = CrossSigningKeyId::from_parts( + ruma::SigningKeyAlgorithm::Ed25519, + key_id.try_into().unwrap(), + ); if existing.keys.contains_key(&target) { let param: CrossSigningKey = serde_json::from_str(raw_key.get()).unwrap(); diff --git a/crates/matrix-sdk/tests/integration/send_queue.rs b/crates/matrix-sdk/tests/integration/send_queue.rs index c88ea5c6934..55455dd0ff9 100644 --- a/crates/matrix-sdk/tests/integration/send_queue.rs +++ b/crates/matrix-sdk/tests/integration/send_queue.rs @@ -1799,7 +1799,7 @@ async fn test_reloading_rooms_with_unsent_events() { .unwrap(); set_client_session(&client).await; - client.send_queue().respawn_tasks_for_rooms_with_unsent_events().await; + client.send_queue().respawn_tasks_for_rooms_with_unsent_requests().await; // Let the sending queues process events. sleep(Duration::from_secs(1)).await; diff --git a/examples/oidc_cli/Cargo.toml b/examples/oidc_cli/Cargo.toml index a5c8617dd02..80d45d4c32a 100644 --- a/examples/oidc_cli/Cargo.toml +++ b/examples/oidc_cli/Cargo.toml @@ -13,7 +13,7 @@ test = false anyhow = { workspace = true } axum = "0.7.4" dirs = "5.0.1" -futures-util = { workspace = true, default-features = false } +futures-util = { workspace = true } matrix-sdk-ui = { path = "../../crates/matrix-sdk-ui" } rand = { workspace = true } serde = { workspace = true } diff --git a/labs/multiverse/src/main.rs b/labs/multiverse/src/main.rs index 810766fa7cb..d88ffa92aba 100644 --- a/labs/multiverse/src/main.rs +++ b/labs/multiverse/src/main.rs @@ -182,7 +182,6 @@ impl App { Default::default(); let timelines = Arc::new(Mutex::new(HashMap::new())); - let c = client.clone(); let r = rooms.clone(); let ri = room_infos.clone(); let ur = ui_rooms.clone(); @@ -192,14 +191,12 @@ impl App { let all_rooms = room_list_service.all_rooms().await?; let listen_task = spawn(async move { - let client = c; let rooms = r; let room_infos = ri; let ui_rooms = ur; let timelines = t; - let (stream, entries_controller) = all_rooms - .entries_with_dynamic_adapters(50_000, client.room_info_notable_update_receiver()); + let (stream, entries_controller) = all_rooms.entries_with_dynamic_adapters(50_000); entries_controller.set_filter(Box::new(new_filter_non_left())); pin_mut!(stream); diff --git a/testing/matrix-sdk-integration-testing/src/tests/room_directory_search.rs b/testing/matrix-sdk-integration-testing/src/tests/room_directory_search.rs index 40b358a9810..b6c20a665d9 100644 --- a/testing/matrix-sdk-integration-testing/src/tests/room_directory_search.rs +++ b/testing/matrix-sdk-integration-testing/src/tests/room_directory_search.rs @@ -46,7 +46,7 @@ async fn test_room_directory_search_filter() -> Result<()> { let mut room_directory_search = RoomDirectorySearch::new(alice); let (values, mut stream) = room_directory_search.results(); assert!(values.is_empty()); - room_directory_search.search(Some(search_string), 10).await?; + room_directory_search.search(Some(search_string), 10, None).await?; let results_batch: Vec> = stream.next().await.unwrap(); assert_eq!(results_batch.len(), 1); @@ -63,7 +63,7 @@ async fn test_room_directory_search_filter() -> Result<()> { assert_pending!(stream); // This should reset the state completely - room_directory_search.search(None, 25).await?; + room_directory_search.search(None, 25, None).await?; let results_batch = stream.next().await.unwrap(); assert_matches!(&results_batch[0], VectorDiff::Clear); assert_matches!(&results_batch[1], VectorDiff::Append { values } => { assert_eq!(values.len(), 25); }); diff --git a/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs b/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs index 9dd0958f5ec..e8b41d9c528 100644 --- a/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs +++ b/testing/matrix-sdk-integration-testing/src/tests/sliding_sync/room.rs @@ -771,8 +771,7 @@ async fn test_delayed_decryption_latest_event() -> Result<()> { // Get the room list of Alice. let alice_all_rooms = alice_sync_service.room_list_service().all_rooms().await.unwrap(); - let (alice_room_list_stream, entries) = alice_all_rooms - .entries_with_dynamic_adapters(10, alice.room_info_notable_update_receiver()); + let (alice_room_list_stream, entries) = alice_all_rooms.entries_with_dynamic_adapters(10); entries.set_filter(Box::new(new_filter_all(vec![]))); pin_mut!(alice_room_list_stream); @@ -935,8 +934,7 @@ async fn test_room_info_notable_update_deduplication() -> Result<()> { alice_room.enable_encryption().await.unwrap(); let alice_room_list = alice_sync_service.room_list_service().all_rooms().await.unwrap(); - let (alice_rooms, alice_room_controller) = alice_room_list - .entries_with_dynamic_adapters(10, alice.room_info_notable_update_receiver()); + let (alice_rooms, alice_room_controller) = alice_room_list.entries_with_dynamic_adapters(10); alice_room_controller.set_filter(Box::new(new_filter_all(vec![])));