From 14edbf36d5633bfc9af1bc97e924d2de6610d137 Mon Sep 17 00:00:00 2001 From: Sean Klein Date: Fri, 18 Oct 2024 16:12:39 -0700 Subject: [PATCH] [nexus] Explicitly terminate pools for qorb (#6881) Fixes https://github.com/oxidecomputer/omicron/issues/6505 , integrates usage of the new qorb APIs to terminate pools cleanly: https://github.com/oxidecomputer/qorb/pull/45 # Background - [https://github.com/oxidecomputer/qorb](https://github.com/oxidecomputer/qorb) is a connection pooling crate, which spins up tokio task that attempt to connect to backends. - When `qorb` was integrated into Omicron in https://github.com/oxidecomputer/omicron/pull/5876, I used it to connect to our database backend (CockroachDB). This included usage in tests, even with a "single backend host" (one, test-only CRDB server) -- I wanted to ensure that we used the same pool and connector logic in tests and prod. # What Went Wrong As identified in #6505 , we saw some tests failing during **termination**. The specific cause of the failure was a panic from [async-bb8-diesel](https://github.com/oxidecomputer/async-bb8-diesel), where we attempted to spawn tasks with a terminating tokio executor. This issue stems from async-bb8-diesel's usage of `tokio::task::spawn_blocking`, where the returned `JoinHandle` is immediately awaited and **unwrapped**, with an expectation that "we should always be able to spawn a blocking task". There was a separate discussion about "whether or not async-bb8-diesel should be unwrapping in this case" (see: https://github.com/oxidecomputer/async-bb8-diesel/pull/77), but instead, I chose to focus on the question: ## Why are we trying to send requests to async-bb8-diesel while the tokio runtime is exiting? The answer to this question lies in qorb's internals -- qorb itself spawns many tokio tasks to handle ongoing work, including monitoring DNS resolution, checking on backend health, and making connections before they are requested. One of these qorb tasks calling `ping_async`, which checks connection health, used the `async-bb8-diesel` interface that ultimately panicked. Within qorb most of these tokio tasks have a drop implementation of the form: ```rust struct MyQorbStructure { handle: tokio::task::JoinHandle<()>, } impl Drop for MyQorbStructure { fn drop(&mut self) { self.handle.abort(); } } ``` Tragically, without async drop support in Rust, this is the best we can do implicitly -- signal that the background tasks should stop ASAP -- but that may not be soon enough! Calling `.abort()` on the `JoinHandle` does not terminate the task immediately, it simply signals that it should shut down at the next yield point. As a result, we can still see the flake observed in #6505: - A qorb pool is initialized with background tasks - One of the qorb worker tasks is about to make a call to check on the health of a connection to a backend - The test finishes, and returns. The tokio runtime begins terminating - We call `drop` on the `qorb` pool, which signals the background task should abort - The aforementioned qorb worker task makes the call to `ping_async`, which calls `spawn_blocking`. This fails, because the tokio runtime is terminating, and returns a [JoinError::Cancelled](https://buildomat.eng.oxide.computer/wg/0/details/01J9YQVN7X5EQNXFSEY6XJBH8B/zfviqPz9RoJp3bY4TafbyqXTwbhqdr7w4oupqBtVARR00CXF/01J9YQWAXY36WM0R2VG27QMFRK#S6049). - `async-bb8-diesel` unwraps this `JoinError`, and the test panics. # How do we mitigate this? That's where this PR comes in. Using the new qorb APIs, we don't rely on the synchronous drop methods -- we explicitly call `.terminate().await` functions which do the following: - Use `tokio::sync::oneshot`s as signals to `tokio::tasks` that they should exit - `.await` the `JoinHandle` for those tasks before returning Doing this work explicitly as a part of cleanup ensures that there are not any background tasks attempting to do new work while the tokio runtime is terminating. --- Cargo.lock | 4 +- Cargo.toml | 2 +- dev-tools/omdb/src/bin/omdb/db.rs | 12 +- live-tests/macros/src/lib.rs | 2 +- live-tests/tests/common/mod.rs | 8 +- nexus-config/src/nexus_config.rs | 17 +- nexus/db-queries/src/db/collection_attach.rs | 95 ++++----- nexus/db-queries/src/db/collection_detach.rs | 64 +++---- .../src/db/collection_detach_many.rs | 84 ++++---- nexus/db-queries/src/db/collection_insert.rs | 24 +-- .../db-queries/src/db/datastore/allow_list.rs | 13 +- nexus/db-queries/src/db/datastore/bgp.rs | 9 +- .../src/db/datastore/clickhouse_policy.rs | 27 ++- .../src/db/datastore/cockroachdb_node_id.rs | 9 +- .../src/db/datastore/cockroachdb_settings.rs | 17 +- nexus/db-queries/src/db/datastore/dataset.rs | 10 +- .../src/db/datastore/db_metadata.rs | 32 ++-- .../db-queries/src/db/datastore/deployment.rs | 39 ++-- .../deployment/external_networking.rs | 15 +- nexus/db-queries/src/db/datastore/disk.rs | 9 +- nexus/db-queries/src/db/datastore/dns.rs | 39 ++-- .../src/db/datastore/external_ip.rs | 37 ++-- nexus/db-queries/src/db/datastore/instance.rs | 57 +++--- .../db-queries/src/db/datastore/inventory.rs | 27 ++- nexus/db-queries/src/db/datastore/ip_pool.rs | 21 +- .../src/db/datastore/ipv4_nat_entry.rs | 27 ++- .../db-queries/src/db/datastore/migration.rs | 9 +- nexus/db-queries/src/db/datastore/mod.rs | 157 ++++++++------- .../src/db/datastore/network_interface.rs | 9 +- nexus/db-queries/src/db/datastore/oximeter.rs | 45 ++--- .../src/db/datastore/physical_disk.rs | 45 +++-- .../src/db/datastore/pub_test_utils.rs | 118 +++++++++++- nexus/db-queries/src/db/datastore/rack.rs | 45 +++-- .../src/db/datastore/region_replacement.rs | 21 +- .../datastore/region_snapshot_replacement.rs | 45 +++-- nexus/db-queries/src/db/datastore/saga.rs | 40 ++-- nexus/db-queries/src/db/datastore/sled.rs | 49 +++-- .../src/db/datastore/switch_port.rs | 9 +- .../db-queries/src/db/datastore/test_utils.rs | 6 +- .../virtual_provisioning_collection.rs | 27 ++- nexus/db-queries/src/db/datastore/vmm.rs | 9 +- nexus/db-queries/src/db/datastore/volume.rs | 72 ++++--- .../src/db/datastore/volume_repair.rs | 9 +- nexus/db-queries/src/db/datastore/vpc.rs | 33 ++-- nexus/db-queries/src/db/explain.rs | 16 +- nexus/db-queries/src/db/lookup.rs | 16 +- nexus/db-queries/src/db/pagination.rs | 37 ++-- nexus/db-queries/src/db/pool.rs | 49 ++++- .../db-queries/src/db/queries/external_ip.rs | 180 ++++++++--------- .../src/db/queries/network_interface.rs | 181 +++++++++--------- nexus/db-queries/src/db/queries/next_item.rs | 55 ++---- nexus/db-queries/src/db/queries/oximeter.rs | 18 +- .../src/db/queries/region_allocation.rs | 10 +- .../virtual_provisioning_collection_update.rs | 34 ++-- nexus/db-queries/src/db/queries/vpc_subnet.rs | 41 +--- nexus/db-queries/src/policy_test/mod.rs | 17 +- nexus/db-queries/src/transaction_retry.rs | 15 +- nexus/metrics-producer-gc/src/lib.rs | 6 +- .../tasks/crdb_node_id_collector.rs | 9 +- .../src/app/background/tasks/saga_recovery.rs | 2 + nexus/src/app/mod.rs | 57 +++++- nexus/src/bin/schema-updater.rs | 9 +- nexus/src/context.rs | 19 +- nexus/src/lib.rs | 45 +++-- nexus/src/populate.rs | 2 + nexus/test-interface/src/lib.rs | 12 +- nexus/test-utils/src/lib.rs | 16 +- .../tests/integration_tests/initialization.rs | 25 ++- nexus/tests/integration_tests/schema.rs | 7 +- workspace-hack/Cargo.toml | 4 +- 70 files changed, 1215 insertions(+), 1115 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d669cf0cbf..bc5f53f759 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8720,9 +8720,9 @@ dependencies = [ [[package]] name = "qorb" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675f442a5904b8b5dc9f5d298be36676b29e2e852eace78a3d3d00822469c88e" +checksum = "d25f71eb7c5ba56a99f0721fd771b2503aa6de4ec73f0891f9b7ac115ca34723" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 81e09c3a67..e751c36779 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -520,7 +520,7 @@ propolis_api_types = { git = "https://github.com/oxidecomputer/propolis", rev = propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" -qorb = "0.1.1" +qorb = "0.1.2" quote = "1.0" rand = "0.8.5" rand_core = "0.6.4" diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 29aa0f9376..e268cf200b 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -19,7 +19,6 @@ use crate::check_allow_destructive::DestructiveOperationToken; use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::helpers::DATABASE_OPTIONS_HEADING; use crate::Omdb; -use anyhow::anyhow; use anyhow::bail; use anyhow::Context; use async_bb8_diesel::AsyncConnection; @@ -255,10 +254,7 @@ impl DbUrlOptions { // doesn't match what we expect. So we use `DataStore::new_unchecked()` // here. We will then check the schema version explicitly and warn the // user if it doesn't match. - let datastore = Arc::new( - DataStore::new_unchecked(log.clone(), pool) - .map_err(|e| anyhow!(e).context("creating datastore"))?, - ); + let datastore = Arc::new(DataStore::new_unchecked(log.clone(), pool)); check_schema_version(&datastore).await; Ok(datastore) } @@ -785,7 +781,7 @@ impl DbArgs { ) -> Result<(), anyhow::Error> { let datastore = self.db_url_opts.connect(omdb, log).await?; let opctx = OpContext::for_tests(log.clone(), datastore.clone()); - match &self.command { + let res = match &self.command { DbCommands::Rack(RackArgs { command: RackCommands::List }) => { cmd_db_rack_list(&opctx, &datastore, &self.fetch_opts).await } @@ -1013,7 +1009,9 @@ impl DbArgs { DbCommands::Volumes(VolumeArgs { command: VolumeCommands::List, }) => cmd_db_volume_list(&datastore, &self.fetch_opts).await, - } + }; + datastore.terminate().await; + res } } diff --git a/live-tests/macros/src/lib.rs b/live-tests/macros/src/lib.rs index 4fdd4029b5..cb41abc54b 100644 --- a/live-tests/macros/src/lib.rs +++ b/live-tests/macros/src/lib.rs @@ -68,7 +68,7 @@ pub fn live_test(_attrs: TokenStream, input: TokenStream) -> TokenStream { #func_ident_string ).await.expect("setting up LiveTestContext"); #func_ident(&ctx).await; - ctx.cleanup_successful(); + ctx.cleanup_successful().await; } }; let mut sig = input_func.sig.clone(); diff --git a/live-tests/tests/common/mod.rs b/live-tests/tests/common/mod.rs index 360e07235a..3e3e583870 100644 --- a/live-tests/tests/common/mod.rs +++ b/live-tests/tests/common/mod.rs @@ -46,10 +46,10 @@ impl LiveTestContext { /// Clean up this `LiveTestContext` /// - /// This mainly removes log files created by the test. We do this in this - /// explicit cleanup function rather than on `Drop` because we want the log - /// files preserved on test failure. - pub fn cleanup_successful(self) { + /// This removes log files and cleans up the [`DataStore`], which + /// but be terminated asynchronously. + pub async fn cleanup_successful(self) { + self.datastore.terminate().await; self.logctx.cleanup_successful(); } diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index 8c8c25b4c1..a0af65b6ec 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -270,6 +270,7 @@ pub struct MgdConfig { #[derive(Clone, Debug, Deserialize, PartialEq)] struct UnvalidatedTunables { max_vpc_ipv4_subnet_prefix: u8, + load_timeout: Option, } /// Tunable configuration parameters, intended for use in test environments or @@ -282,6 +283,11 @@ pub struct Tunables { /// Note that this is the maximum _prefix_ size, which sets the minimum size /// of the subnet. pub max_vpc_ipv4_subnet_prefix: u8, + + /// How long should we attempt to loop until the schema matches? + /// + /// If "None", nexus loops forever during initialization. + pub load_timeout: Option, } // Convert from the unvalidated tunables, verifying each parameter as needed. @@ -292,6 +298,7 @@ impl TryFrom for Tunables { Tunables::validate_ipv4_prefix(unvalidated.max_vpc_ipv4_subnet_prefix)?; Ok(Tunables { max_vpc_ipv4_subnet_prefix: unvalidated.max_vpc_ipv4_subnet_prefix, + load_timeout: unvalidated.load_timeout, }) } } @@ -341,7 +348,10 @@ pub const MAX_VPC_IPV4_SUBNET_PREFIX: u8 = 26; impl Default for Tunables { fn default() -> Self { - Tunables { max_vpc_ipv4_subnet_prefix: MAX_VPC_IPV4_SUBNET_PREFIX } + Tunables { + max_vpc_ipv4_subnet_prefix: MAX_VPC_IPV4_SUBNET_PREFIX, + load_timeout: None, + } } } @@ -1003,7 +1013,10 @@ mod test { trusted_root: Utf8PathBuf::from("/path/to/root.json"), }), schema: None, - tunables: Tunables { max_vpc_ipv4_subnet_prefix: 27 }, + tunables: Tunables { + max_vpc_ipv4_subnet_prefix: 27, + load_timeout: None + }, dendrite: HashMap::from([( SwitchLocation::Switch0, DpdConfig { diff --git a/nexus/db-queries/src/db/collection_attach.rs b/nexus/db-queries/src/db/collection_attach.rs index c009d60483..127861b2a8 100644 --- a/nexus/db-queries/src/db/collection_attach.rs +++ b/nexus/db-queries/src/db/collection_attach.rs @@ -577,7 +577,8 @@ where #[cfg(test)] mod test { use super::*; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -585,7 +586,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -869,11 +869,9 @@ mod test { async fn test_attach_missing_collection_fails() { let logctx = dev::test_setup_log("test_attach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -891,16 +889,15 @@ mod test { assert!(matches!(attach, Err(AttachError::CollectionNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_missing_resource_fails() { let logctx = dev::test_setup_log("test_attach_missing_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -928,16 +925,15 @@ mod test { // The collection should remain unchanged. assert_eq!(collection, get_collection(collection_id, &conn).await); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_once() { let logctx = dev::test_setup_log("test_attach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -976,16 +972,15 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_once_synchronous() { let logctx = dev::test_setup_log("test_attach_once_synchronous"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = setup_db(&pool).await; @@ -1025,18 +1020,16 @@ mod test { ); assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_multiple_times() { let logctx = dev::test_setup_log("test_attach_multiple_times"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; const RESOURCE_COUNT: u32 = 5; @@ -1081,18 +1074,16 @@ mod test { ); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_beyond_capacity_fails() { let logctx = dev::test_setup_log("test_attach_beyond_capacity_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1145,18 +1136,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_while_already_attached() { let logctx = dev::test_setup_log("test_attach_while_already_attached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1252,18 +1241,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_with_filters() { let logctx = dev::test_setup_log("test_attach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1307,18 +1294,16 @@ mod test { assert_eq!(returned_resource, get_resource(resource_id, &conn).await); assert_eq!(returned_resource.description(), "new description"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_deleted_resource_fails() { let logctx = dev::test_setup_log("test_attach_deleted_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1352,18 +1337,16 @@ mod test { .await; assert!(matches!(attach, Err(AttachError::ResourceNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_attach_without_update_filter() { let logctx = dev::test_setup_log("test_attach_without_update_filter"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1408,7 +1391,7 @@ mod test { .collection_id .is_none()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_detach.rs b/nexus/db-queries/src/db/collection_detach.rs index bc547d5127..cdf8e111c7 100644 --- a/nexus/db-queries/src/db/collection_detach.rs +++ b/nexus/db-queries/src/db/collection_detach.rs @@ -481,7 +481,8 @@ where mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -489,7 +490,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -782,11 +782,9 @@ mod test { async fn test_detach_missing_collection_fails() { let logctx = dev::test_setup_log("test_detach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -803,18 +801,16 @@ mod test { assert!(matches!(detach, Err(DetachError::CollectionNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_missing_resource_fails() { let logctx = dev::test_setup_log("test_detach_missing_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -839,18 +835,16 @@ mod test { // The collection should remain unchanged. assert_eq!(collection, get_collection(collection_id, &conn).await); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once() { let logctx = dev::test_setup_log("test_detach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -879,18 +873,16 @@ mod test { // The returned value should be the latest value in the DB. assert_eq!(returned_resource, get_resource(resource_id, &conn).await); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_while_already_detached() { let logctx = dev::test_setup_log("test_detach_while_already_detached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -943,18 +935,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_deleted_resource_fails() { let logctx = dev::test_setup_log("test_detach_deleted_resource_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -987,18 +977,16 @@ mod test { .await; assert!(matches!(detach, Err(DetachError::ResourceNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_without_update_filter() { let logctx = dev::test_setup_log("test_detach_without_update_filter"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1044,7 +1032,7 @@ mod test { .collection_id .is_some()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_detach_many.rs b/nexus/db-queries/src/db/collection_detach_many.rs index 36755599d4..15267dd7ee 100644 --- a/nexus/db-queries/src/db/collection_detach_many.rs +++ b/nexus/db-queries/src/db/collection_detach_many.rs @@ -479,7 +479,8 @@ where mod test { use super::*; use crate::db::collection_attach::DatastoreAttachTarget; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::Utc; use db_macros::Resource; @@ -487,7 +488,6 @@ mod test { use diesel::pg::Pg; use diesel::QueryDsl; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; use omicron_test_utils::dev; use uuid::Uuid; @@ -774,11 +774,9 @@ mod test { async fn test_detach_missing_collection_fails() { let logctx = dev::test_setup_log("test_detach_missing_collection_fails"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -796,7 +794,7 @@ mod test { assert!(matches!(detach, Err(DetachManyError::CollectionNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -804,11 +802,9 @@ mod test { async fn test_detach_missing_resource_succeeds() { let logctx = dev::test_setup_log("test_detach_missing_resource_succeeds"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let _resource_id = uuid::Uuid::new_v4(); @@ -838,18 +834,16 @@ mod test { get_collection(collection_id, &conn).await ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once() { let logctx = dev::test_setup_log("test_detach_once"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -881,18 +875,16 @@ mod test { get_collection(collection_id, &conn).await ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_once_synchronous() { let logctx = dev::test_setup_log("test_detach_once_synchronous"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -926,18 +918,16 @@ mod test { get_collection(collection_id, &conn).await ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_while_already_detached() { let logctx = dev::test_setup_log("test_detach_while_already_detached"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -982,18 +972,16 @@ mod test { "... and again!" ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_filter_collection() { let logctx = dev::test_setup_log("test_detach_filter_collection"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); @@ -1033,18 +1021,16 @@ mod test { _ => panic!("Unexpected error: {:?}", err), }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_deleted_resource() { let logctx = dev::test_setup_log("test_detach_deleted_resource"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -1091,18 +1077,16 @@ mod test { &collection_id, ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_detach_many() { let logctx = dev::test_setup_log("test_detach_many"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; // Create the collection and some resources. let collection_id1 = uuid::Uuid::new_v4(); @@ -1166,7 +1150,7 @@ mod test { &collection_id2 ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/collection_insert.rs b/nexus/db-queries/src/db/collection_insert.rs index 3aaea6aeb1..7f8275e594 100644 --- a/nexus/db-queries/src/db/collection_insert.rs +++ b/nexus/db-queries/src/db/collection_insert.rs @@ -405,14 +405,14 @@ where #[cfg(test)] mod test { use super::*; - use crate::db::{self, identity::Resource as IdentityResource}; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::identity::Resource as IdentityResource; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use chrono::{DateTime, Utc}; use db_macros::Resource; use diesel::expression_methods::ExpressionMethods; use diesel::pg::Pg; use diesel::QueryDsl; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; table! { @@ -556,11 +556,9 @@ mod test { #[tokio::test] async fn test_collection_not_present() { let logctx = dev::test_setup_log("test_collection_not_present"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -579,18 +577,16 @@ mod test { .await; assert!(matches!(insert, Err(AsyncInsertError::CollectionNotFound))); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_collection_present() { let logctx = dev::test_setup_log("test_collection_present"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - - let conn = setup_db(&pool).await; + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = setup_db(pool).await; let collection_id = uuid::Uuid::new_v4(); let resource_id = uuid::Uuid::new_v4(); @@ -642,7 +638,7 @@ mod test { // Make sure rcgen got incremented assert_eq!(collection_rcgen, 2); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/allow_list.rs b/nexus/db-queries/src/db/datastore/allow_list.rs index 7c1643451f..ce839ebcbc 100644 --- a/nexus/db-queries/src/db/datastore/allow_list.rs +++ b/nexus/db-queries/src/db/datastore/allow_list.rs @@ -83,19 +83,16 @@ impl super::DataStore { #[cfg(test)] mod tests { - use crate::db::{ - datastore::test_utils::datastore_test, - fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID, - }; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; + use crate::db::fixed_data::allow_list::USER_FACING_SERVICES_ALLOW_LIST_ID; use omicron_common::api::external; use omicron_test_utils::dev; #[tokio::test] async fn test_allowed_source_ip_database_ops() { let logctx = dev::test_setup_log("test_allowed_source_ip_database_ops"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Should have the default to start with. let record = datastore @@ -203,7 +200,7 @@ mod tests { "Updated allowed IPs are incorrect" ); - db.cleanup().await.expect("failed to cleanup database"); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/bgp.rs b/nexus/db-queries/src/db/datastore/bgp.rs index 96231115da..ccf5c6bb75 100644 --- a/nexus/db-queries/src/db/datastore/bgp.rs +++ b/nexus/db-queries/src/db/datastore/bgp.rs @@ -1000,8 +1000,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; use omicron_test_utils::dev; @@ -1011,8 +1010,8 @@ mod tests { let logctx = dev::test_setup_log( "test_delete_bgp_config_and_announce_set_by_name", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let config_name: Name = "testconfig47".parse().unwrap(); let announce_name: Name = "testannounce47".parse().unwrap(); @@ -1069,7 +1068,7 @@ mod tests { .await .expect("delete announce set by name"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs index d433bb9b60..cdd0e4127b 100644 --- a/nexus/db-queries/src/db/datastore/clickhouse_policy.rs +++ b/nexus/db-queries/src/db/datastore/clickhouse_policy.rs @@ -175,9 +175,8 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_inventory::now_db_precision; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::ClickhouseMode; use omicron_test_utils::dev; @@ -185,13 +184,13 @@ mod tests { async fn test_clickhouse_policy_basic() { // Setup let logctx = dev::test_setup_log("test_clickhouse_policy_basic"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Listing an empty table should return an empty vec assert!(datastore - .clickhouse_policy_list(&opctx, &DataPageParams::max_page()) + .clickhouse_policy_list(opctx, &DataPageParams::max_page()) .await .unwrap() .is_empty()); @@ -204,7 +203,7 @@ mod tests { }; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -213,7 +212,7 @@ mod tests { // Inserting version 2 before version 1 should not work policy.version = 2; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -222,21 +221,21 @@ mod tests { // Inserting version 1 should work policy.version = 1; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); // Inserting version 2 should work policy.version = 2; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); // Inserting version 4 should not work, since the prior version is 2 policy.version = 4; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .unwrap_err() .to_string() @@ -245,7 +244,7 @@ mod tests { // Inserting version 3 should work policy.version = 3; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); @@ -254,12 +253,12 @@ mod tests { policy.mode = ClickhouseMode::Both { target_servers: 3, target_keepers: 5 }; assert!(datastore - .clickhouse_policy_insert_latest_version(&opctx, &policy) + .clickhouse_policy_insert_latest_version(opctx, &policy) .await .is_ok()); let history = datastore - .clickhouse_policy_list(&opctx, &DataPageParams::max_page()) + .clickhouse_policy_list(opctx, &DataPageParams::max_page()) .await .unwrap(); @@ -278,7 +277,7 @@ mod tests { } // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs b/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs index fee915ab59..1c1a699c26 100644 --- a/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs +++ b/nexus/db-queries/src/db/datastore/cockroachdb_node_id.rs @@ -82,16 +82,15 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn test_cockroachdb_node_id() { let logctx = dev::test_setup_log("test_service_network_interfaces_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Make up a CRDB zone id. let crdb_zone_id = OmicronZoneUuid::new_v4(); @@ -160,7 +159,7 @@ mod tests { .expect("looked up node ID"); assert_eq!(node_id.as_deref(), Some(fake_node_id)); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs index a38cfb8935..ba7c302f83 100644 --- a/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs +++ b/nexus/db-queries/src/db/datastore/cockroachdb_settings.rs @@ -133,24 +133,17 @@ impl DataStore { #[cfg(test)] mod test { - use super::{CockroachDbSettings, OpContext}; - use nexus_test_utils::db::test_setup_database; + use super::CockroachDbSettings; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::deployment::CockroachDbClusterVersion; use omicron_common::api::external::Error; use omicron_test_utils::dev; - use std::sync::Arc; #[tokio::test] async fn test_preserve_downgrade() { let logctx = dev::test_setup_log("test_preserve_downgrade"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let settings = datastore.cockroachdb_settings(&opctx).await.unwrap(); let version: CockroachDbClusterVersion = @@ -247,7 +240,7 @@ mod test { } } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/dataset.rs b/nexus/db-queries/src/db/datastore/dataset.rs index 0fe1c7912e..1843df0c7d 100644 --- a/nexus/db-queries/src/db/datastore/dataset.rs +++ b/nexus/db-queries/src/db/datastore/dataset.rs @@ -235,21 +235,19 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_db_model::Generation; use nexus_db_model::SledBaseboard; use nexus_db_model::SledSystemHardware; use nexus_db_model::SledUpdate; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::internal::shared::DatasetKind as ApiDatasetKind; use omicron_test_utils::dev; #[tokio::test] async fn test_insert_if_not_exists() { let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; - let opctx = &opctx; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // There should be no datasets initially. assert_eq!( @@ -383,7 +381,7 @@ mod test { expected_datasets, ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/db_metadata.rs b/nexus/db-queries/src/db/datastore/db_metadata.rs index b997bf384f..fbb6cd35c8 100644 --- a/nexus/db-queries/src/db/datastore/db_metadata.rs +++ b/nexus/db-queries/src/db/datastore/db_metadata.rs @@ -496,31 +496,26 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use camino::Utf8Path; use camino_tempfile::Utf8TempDir; use nexus_db_model::SCHEMA_VERSION; - use nexus_test_utils::db as test_db; use omicron_test_utils::dev; - use std::sync::Arc; // Confirms that calling the internal "ensure_schema" function can succeed // when the database is already at that version. #[tokio::test] async fn ensure_schema_is_current_version() { let logctx = dev::test_setup_log("ensure_schema_is_current_version"); - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let datastore = db.datastore(); datastore .ensure_schema(&logctx.log, SCHEMA_VERSION, None) .await .expect("Failed to ensure schema"); - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -556,10 +551,8 @@ mod test { let logctx = dev::test_setup_log("concurrent_nexus_instances_only_move_forward"); let log = &logctx.log; - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // Mimic the layout of "schema/crdb". @@ -659,7 +652,7 @@ mod test { .collect::, _>>() .expect("Failed to create datastore"); - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -668,10 +661,8 @@ mod test { let logctx = dev::test_setup_log("schema_version_subcomponents_save_progress"); let log = &logctx.log; - let mut crdb = test_db::test_setup_database(&logctx.log).await; - - let cfg = db::Config { url: crdb.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // Mimic the layout of "schema/crdb". @@ -753,8 +744,7 @@ mod test { // Manually construct the datastore to avoid the backoff timeout. // We want to trigger errors, but have no need to wait. - let datastore = - DataStore::new_unchecked(log.clone(), pool.clone()).unwrap(); + let datastore = DataStore::new_unchecked(log.clone(), pool.clone()); while let Err(e) = datastore .ensure_schema(&log, SCHEMA_VERSION, Some(&all_versions)) .await @@ -780,7 +770,7 @@ mod test { .expect("Failed to get data"); assert_eq!(data, "abcd"); - crdb.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/deployment.rs b/nexus/db-queries/src/db/datastore/deployment.rs index e43a50a291..4398c4b13f 100644 --- a/nexus/db-queries/src/db/datastore/deployment.rs +++ b/nexus/db-queries/src/db/datastore/deployment.rs @@ -1730,7 +1730,7 @@ impl RunQueryDsl for InsertTargetQuery {} mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::raw_query_builder::QueryBuilder; use nexus_inventory::now_db_precision; use nexus_inventory::CollectionBuilder; @@ -1738,7 +1738,6 @@ mod tests { use nexus_reconfigurator_planning::blueprint_builder::Ensure; use nexus_reconfigurator_planning::blueprint_builder::EnsureMultiple; use nexus_reconfigurator_planning::example::example; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; @@ -1906,8 +1905,8 @@ mod tests { async fn test_empty_blueprint() { // Setup let logctx = dev::test_setup_log("test_empty_blueprint"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an empty blueprint from it let blueprint1 = BlueprintBuilder::build_empty_with_sleds( @@ -1956,7 +1955,7 @@ mod tests { // on other tests to check blueprint deletion. // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1965,8 +1964,8 @@ mod tests { const TEST_NAME: &str = "test_representative_blueprint"; // Setup let logctx = dev::test_setup_log(TEST_NAME); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a cohesive representative collection/policy/blueprint let (collection, planning_input, blueprint1) = @@ -2191,7 +2190,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2199,8 +2198,8 @@ mod tests { async fn test_set_target() { // Setup let logctx = dev::test_setup_log("test_set_target"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Trying to insert a target that doesn't reference a blueprint should // fail with a relevant error message. @@ -2377,7 +2376,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2385,8 +2384,8 @@ mod tests { async fn test_set_target_enabled() { // Setup let logctx = dev::test_setup_log("test_set_target_enabled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an initial empty collection let collection = CollectionBuilder::new("test").build(); @@ -2490,7 +2489,7 @@ mod tests { } // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2580,8 +2579,8 @@ mod tests { let logctx = dev::test_setup_log( "test_ensure_external_networking_works_with_good_target", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let blueprint = create_blueprint_with_external_ip(&datastore, &opctx).await; @@ -2607,7 +2606,7 @@ mod tests { .expect("Should be able to allocate external network resources"); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2617,8 +2616,8 @@ mod tests { let logctx = dev::test_setup_log( "test_ensure_external_networking_bails_on_bad_target", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an initial empty collection let collection = CollectionBuilder::new("test").build(); @@ -2812,7 +2811,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs index 7ace07305d..0f13050b98 100644 --- a/nexus/db-queries/src/db/datastore/deployment/external_networking.rs +++ b/nexus/db-queries/src/db/datastore/deployment/external_networking.rs @@ -408,7 +408,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use anyhow::Context as _; use async_bb8_diesel::AsyncSimpleConnection; @@ -417,7 +417,6 @@ mod tests { use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_model::SqlU16; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::blueprint_zone_type; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; @@ -876,8 +875,8 @@ mod tests { // Set up. usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Generate the test values we care about. let mut harness = Harness::new(); @@ -1128,7 +1127,7 @@ mod tests { } // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1137,8 +1136,8 @@ mod tests { // Set up. usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Generate the test values we care about. let harness = Harness::new(); @@ -1195,7 +1194,7 @@ mod tests { harness.assert_nics_are_deleted_in_datastore(&datastore).await; // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 7adc6df3e1..76f4055373 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -843,8 +843,7 @@ impl DataStore { mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::external_api::params; use omicron_common::api::external; use omicron_test_utils::dev; @@ -854,8 +853,8 @@ mod tests { let logctx = dev::test_setup_log("test_undelete_disk_set_faulted_idempotent"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, db_datastore) = (db.opctx(), db.datastore()); let silo_id = opctx.authn.actor().unwrap().silo_id().unwrap(); @@ -979,7 +978,7 @@ mod tests { ); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/dns.rs b/nexus/db-queries/src/db/datastore/dns.rs index f37e8d9e34..9279933e47 100644 --- a/nexus/db-queries/src/db/datastore/dns.rs +++ b/nexus/db-queries/src/db/datastore/dns.rs @@ -729,7 +729,7 @@ impl DataStoreDnsTest for DataStore { #[cfg(test)] mod test { - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DnsVersionUpdateBuilder; use crate::db::DataStore; use crate::db::TransactionError; @@ -744,7 +744,6 @@ mod test { use nexus_db_model::DnsZone; use nexus_db_model::Generation; use nexus_db_model::InitialDnsGroup; - use nexus_test_utils::db::test_setup_database; use nexus_types::internal_api::params::DnsRecord; use nexus_types::internal_api::params::Srv; use omicron_common::api::external::Error; @@ -758,8 +757,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_uninitialized() { let logctx = dev::test_setup_log("test_read_dns_config_uninitialized"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // If we attempt to load the config when literally nothing related to // DNS has been initialized, we will get an InternalError because we @@ -834,7 +833,7 @@ mod test { version for DNS group External, found 0" ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -842,8 +841,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_basic() { let logctx = dev::test_setup_log("test_read_dns_config_basic"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create exactly one zone with no names in it. // This will not show up in the read config. @@ -940,7 +939,7 @@ mod test { .expect("failed to read DNS config with batch size 1"); assert_eq!(dns_config_batch_1, dns_config); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -948,8 +947,8 @@ mod test { #[tokio::test] async fn test_read_dns_config_complex() { let logctx = dev::test_setup_log("test_read_dns_config_complex"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let batch_size = NonZeroU32::new(10).unwrap(); let now = Utc::now(); let log = &logctx.log; @@ -1310,7 +1309,7 @@ mod test { HashMap::from([("n1".to_string(), records_r2.clone())]) ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1318,8 +1317,8 @@ mod test { #[tokio::test] async fn test_dns_uniqueness() { let logctx = dev::test_setup_log("test_dns_uniqueness"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let now = Utc::now(); // There cannot be two DNS zones in the same group with the same name. @@ -1415,7 +1414,7 @@ mod test { .contains("duplicate key value violates unique constraint")); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1491,8 +1490,8 @@ mod test { #[tokio::test] async fn test_dns_update_incremental() { let logctx = dev::test_setup_log("test_dns_update_incremental"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let now = Utc::now(); // Create three DNS zones for testing: @@ -1862,15 +1861,15 @@ mod test { assert_eq!(dns_config.zones[1].zone_name, "oxide2.test"); assert_eq!(dns_config.zones[0].records, dns_config.zones[1].records,); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_dns_update_from_version() { let logctx = dev::test_setup_log("test_dns_update_from_version"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // The guts of `dns_update_from_version()` are shared with // `dns_update_incremental()`. The main cases worth testing here are @@ -1975,7 +1974,7 @@ mod test { assert!(!records.contains_key("krabappel")); assert!(records.contains_key("hoover")); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 4b7f4a3825..a03b6a6249 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -87,7 +87,7 @@ impl DataStore { probe_id: Uuid, pool: Option, ) -> CreateResult { - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral_probe( ip_id, probe_id, @@ -123,7 +123,7 @@ impl DataStore { // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an @@ -205,7 +205,7 @@ impl DataStore { // If no pool specified, use the default logic None => { let (authz_pool, ..) = - self.ip_pools_fetch_default(&opctx).await?; + self.ip_pools_fetch_default(opctx).await?; authz_pool } }; @@ -224,7 +224,7 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( @@ -695,7 +695,7 @@ impl DataStore { ip_id: Uuid, instance_id: InstanceUuid, ) -> Result, Error> { - let _ = LookupPath::new(&opctx, self) + let _ = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -951,7 +951,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -993,7 +993,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -1132,8 +1132,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::external_api::shared::IpRange; @@ -1164,11 +1163,11 @@ mod tests { async fn test_service_ip_list() { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_ip_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, vec![]); // Set up service IP pool range @@ -1178,11 +1177,11 @@ mod tests { )) .unwrap(); let (service_ip_pool, _) = datastore - .ip_pools_service_lookup(&opctx) + .ip_pools_service_lookup(opctx) .await .expect("lookup service ip pool"); datastore - .ip_pool_add_range(&opctx, &service_ip_pool, &ip_range) + .ip_pool_add_range(opctx, &service_ip_pool, &ip_range) .await .expect("add range to service ip pool"); @@ -1208,7 +1207,7 @@ mod tests { }; let external_ip = datastore .external_ip_allocate_omicron_zone( - &opctx, + opctx, OmicronZoneUuid::new_v4(), ZoneKind::Nexus, external_ip, @@ -1221,7 +1220,7 @@ mod tests { external_ips.sort_by_key(|ip| ip.id); // Ensure we see them all. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); // Deallocate a few, and ensure we don't see them anymore. @@ -1230,7 +1229,7 @@ mod tests { if i % 3 == 0 { let id = external_ip.id; datastore - .deallocate_external_ip(&opctx, id) + .deallocate_external_ip(opctx, id) .await .expect("failed to deallocate IP"); removed_ip_ids.insert(id); @@ -1243,10 +1242,10 @@ mod tests { external_ips.retain(|ip| !removed_ip_ids.contains(&ip.id)); // Ensure we see them all remaining IPs. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index 8fdb16b2f3..0698883891 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -1943,15 +1943,14 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::sled; - use crate::db::datastore::test_utils::datastore_test; use crate::db::lookup::LookupPath; use crate::db::pagination::Paginator; use nexus_db_model::InstanceState; use nexus_db_model::Project; use nexus_db_model::VmmRuntimeState; use nexus_db_model::VmmState; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::identity::Asset; use nexus_types::silo::DEFAULT_SILO_ID; @@ -2034,8 +2033,8 @@ mod tests { async fn test_instance_updater_acquires_lock() { // Setup let logctx = dev::test_setup_log("test_instance_updater_acquires_lock"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let saga1 = Uuid::new_v4(); let saga2 = Uuid::new_v4(); let (authz_project, _) = create_test_project(&datastore, &opctx).await; @@ -2108,7 +2107,7 @@ mod tests { assert!(unlocked, "instance must actually be unlocked"); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2117,8 +2116,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_updater_lock_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2172,7 +2171,7 @@ mod tests { assert!(!unlocked, "instance should already have been unlocked"); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2182,8 +2181,8 @@ mod tests { let logctx = dev::test_setup_log( "test_instance_updater_cant_unlock_someone_elses_instance_", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2265,7 +2264,7 @@ mod tests { assert!(!unlocked); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2274,8 +2273,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_unlocking_a_deleted_instance_is_okay"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2324,7 +2323,7 @@ mod tests { .expect("instance should unlock"); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2333,8 +2332,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_commit_update_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2422,7 +2421,7 @@ mod tests { assert_eq!(instance.runtime().r#gen, new_runtime.r#gen); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2432,8 +2431,8 @@ mod tests { let logctx = dev::test_setup_log( "test_instance_update_invalidated_while_locked", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2512,7 +2511,7 @@ mod tests { assert_eq!(instance.runtime().nexus_state, new_runtime.nexus_state); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2520,8 +2519,8 @@ mod tests { async fn test_instance_fetch_all() { // Setup let logctx = dev::test_setup_log("test_instance_fetch_all"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2692,7 +2691,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2700,8 +2699,8 @@ mod tests { async fn test_instance_set_migration_ids() { // Setup let logctx = dev::test_setup_log("test_instance_set_migration_ids"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let authz_instance = create_test_instance( &datastore, @@ -2959,7 +2958,7 @@ mod tests { assert_eq!(instance.runtime().dst_propolis_id, Some(vmm3.id)); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2969,8 +2968,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_instance_and_vmm_list_by_sled_agent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, _) = create_test_project(&datastore, &opctx).await; let mut expected_instances = BTreeSet::new(); @@ -3096,7 +3095,7 @@ mod tests { assert_eq!(expected_instances, found_instances); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/inventory.rs b/nexus/db-queries/src/db/datastore/inventory.rs index 3fdf38f19a..4a2ab216a2 100644 --- a/nexus/db-queries/src/db/datastore/inventory.rs +++ b/nexus/db-queries/src/db/datastore/inventory.rs @@ -2444,7 +2444,7 @@ impl DataStoreInventoryTest for DataStore { #[cfg(test)] mod test { use crate::db::datastore::inventory::DataStoreInventoryTest; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DataStoreConnection; use crate::db::raw_query_builder::{QueryBuilder, TrustedStr}; use crate::db::schema; @@ -2457,7 +2457,6 @@ mod test { use gateway_client::types::SpType; use nexus_inventory::examples::representative; use nexus_inventory::examples::Representative; - use nexus_test_utils::db::test_setup_database; use nexus_test_utils::db::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_types::inventory::BaseboardId; use nexus_types::inventory::CabooseWhich; @@ -2511,8 +2510,8 @@ mod test { #[tokio::test] async fn test_find_hw_baseboard_id_missing_returns_not_found() { let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let baseboard_id = BaseboardId { serial_number: "some-serial".into(), part_number: "some-part".into(), @@ -2522,7 +2521,7 @@ mod test { .await .unwrap_err(); assert!(matches!(err, Error::ObjectNotFound { .. })); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2532,8 +2531,8 @@ mod test { async fn test_inventory_insert() { // Setup let logctx = dev::test_setup_log("inventory_insert"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create an empty collection and write it to the database. let builder = nexus_inventory::CollectionBuilder::new("test"); @@ -3015,7 +3014,7 @@ mod test { assert_ne!(coll_counts.rot_pages, 0); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3098,8 +3097,8 @@ mod test { async fn test_inventory_deletion() { // Setup let logctx = dev::test_setup_log("inventory_deletion"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a representative collection and write it to the database. let Representative { builder, .. } = representative(); @@ -3136,7 +3135,7 @@ mod test { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3144,8 +3143,8 @@ mod test { async fn test_representative_collection_populates_database() { // Setup let logctx = dev::test_setup_log("inventory_deletion"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a representative collection and write it to the database. let Representative { builder, .. } = representative(); @@ -3161,7 +3160,7 @@ mod test { .expect("All inv_... tables should be populated by representative collection"); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 7fa38badb0..9ea8f7b088 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -1121,12 +1121,11 @@ mod test { use std::num::NonZeroU32; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::{ IpPool, IpPoolResource, IpPoolResourceType, Project, }; use assert_matches::assert_matches; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::address::{IpRange, Ipv4Range, Ipv6Range}; @@ -1139,8 +1138,8 @@ mod test { #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // we start out with no default pool, so we expect not found let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); @@ -1298,15 +1297,15 @@ mod test { .expect("Should list silo IP pools"); assert_eq!(silo_pools.len(), 0); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_internal_ip_pool() { let logctx = dev::test_setup_log("test_internal_ip_pool"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // confirm internal pool appears as internal let (authz_pool, _pool) = @@ -1352,15 +1351,15 @@ mod test { datastore.ip_pool_is_internal(&opctx, &authz_other_pool).await; assert_eq!(is_internal, Ok(false)); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_ip_pool_utilization() { let logctx = dev::test_setup_log("test_ip_utilization"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); let project = Project::new( @@ -1503,7 +1502,7 @@ mod test { assert_eq!(max_ips.ipv4, 5); assert_eq!(max_ips.ipv6, 1208925819614629174706166); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs index 0a514f55dc..80794f193a 100644 --- a/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs +++ b/nexus/db-queries/src/db/datastore/ipv4_nat_entry.rs @@ -379,10 +379,9 @@ fn ipv4_nat_next_version() -> diesel::expression::SqlLiteral { mod test { use std::{net::Ipv4Addr, str::FromStr}; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use chrono::Utc; use nexus_db_model::{Ipv4NatEntry, Ipv4NatValues, MacAddr, Vni}; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external; use omicron_test_utils::dev; use rand::seq::IteratorRandom; @@ -391,8 +390,8 @@ mod test { #[tokio::test] async fn nat_version_tracking() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -538,7 +537,7 @@ mod test { 3 ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -548,8 +547,8 @@ mod test { /// of properties. async fn table_allows_unique_active_multiple_deleted() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -682,7 +681,7 @@ mod test { 4 ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -690,8 +689,8 @@ mod test { #[tokio::test] async fn ipv4_nat_sync_service_zones() { let logctx = dev::test_setup_log("ipv4_nat_sync_service_zones"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -804,7 +803,7 @@ mod test { && entry.version_removed.is_none() })); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -812,8 +811,8 @@ mod test { #[tokio::test] async fn ipv4_nat_changeset() { let logctx = dev::test_setup_log("test_nat_version_tracking"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // We should not have any NAT entries at this moment let initial_state = @@ -953,7 +952,7 @@ mod test { // did we see everything? assert_eq!(total_changes, db_records.len()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/migration.rs b/nexus/db-queries/src/db/datastore/migration.rs index 0866226d69..e1d8c070e7 100644 --- a/nexus/db-queries/src/db/datastore/migration.rs +++ b/nexus/db-queries/src/db/datastore/migration.rs @@ -178,11 +178,10 @@ impl DataStore { mod tests { use super::*; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::lookup::LookupPath; use crate::db::model::Instance; use nexus_db_model::Project; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::ByteCount; @@ -277,8 +276,8 @@ mod tests { async fn test_migration_query_by_instance() { // Setup let logctx = dev::test_setup_log("test_migration_query_by_instance"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_instance = create_test_instance(&datastore, &opctx).await; let instance_id = InstanceUuid::from_untyped_uuid(authz_instance.id()); @@ -356,7 +355,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index e724d3d9af..f943e70c4a 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -27,7 +27,7 @@ use crate::db::{ error::{public_error_from_diesel, ErrorHandler}, }; use ::oximeter::types::ProducerRegistry; -use anyhow::{anyhow, bail, Context}; +use anyhow::{bail, Context}; use async_bb8_diesel::AsyncRunQueryDsl; use diesel::pg::Pg; use diesel::prelude::*; @@ -197,16 +197,15 @@ impl DataStore { /// Ignores the underlying DB version. Should be used with caution, as usage /// of this method can construct a Datastore which does not understand /// the underlying CockroachDB schema. Data corruption could result. - pub fn new_unchecked(log: Logger, pool: Arc) -> Result { - let datastore = DataStore { + pub fn new_unchecked(log: Logger, pool: Arc) -> Self { + DataStore { log, pool, virtual_provisioning_collection_producer: crate::provisioning::Producer::new(), transaction_retry_producer: crate::transaction_retry::Producer::new( ), - }; - Ok(datastore) + } } /// Constructs a new Datastore object. @@ -217,15 +216,32 @@ impl DataStore { log: &Logger, pool: Arc, config: Option<&AllSchemaVersions>, + ) -> Result { + Self::new_with_timeout(log, pool, config, None).await + } + + pub async fn new_with_timeout( + log: &Logger, + pool: Arc, + config: Option<&AllSchemaVersions>, + try_for: Option, ) -> Result { let datastore = - Self::new_unchecked(log.new(o!("component" => "datastore")), pool)?; + Self::new_unchecked(log.new(o!("component" => "datastore")), pool); + + let start = std::time::Instant::now(); // Keep looping until we find that the schema matches our expectation. const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; retry_notify( retry_policy_internal_service(), || async { + if let Some(try_for) = try_for { + if std::time::Instant::now() > start + try_for { + return Err(BackoffError::permanent(())); + } + } + match datastore .ensure_schema(&log, EXPECTED_VERSION, config) .await @@ -252,8 +268,7 @@ impl DataStore { pool: Arc, ) -> Result { let datastore = - Self::new_unchecked(log.new(o!("component" => "datastore")), pool) - .map_err(|e| anyhow!("{}", e))?; + Self::new_unchecked(log.new(o!("component" => "datastore")), pool); const EXPECTED_VERSION: SemverVersion = nexus_db_model::SCHEMA_VERSION; let (found_version, found_target) = datastore .database_schema_version() @@ -277,6 +292,11 @@ impl DataStore { Ok(datastore) } + /// Terminates the underlying pool, stopping it from connecting to backends. + pub async fn terminate(&self) { + self.pool.terminate().await + } + pub fn register_producers(&self, registry: &ProducerRegistry) { registry .register_producer( @@ -426,7 +446,7 @@ mod test { use crate::authn; use crate::authn::SiloAuthnPolicy; use crate::authz; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test_utils::{ IneligibleSledKind, IneligibleSleds, }; @@ -447,7 +467,6 @@ mod test { use nexus_db_fixed_data::silo::DEFAULT_SILO; use nexus_db_model::IpAttachState; use nexus_db_model::{to_db_typed_uuid, Generation}; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::{ @@ -489,8 +508,8 @@ mod test { #[tokio::test] async fn test_project_creation() { let logctx = dev::test_setup_log("test_project_creation"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); @@ -519,15 +538,15 @@ mod test { .unwrap(); assert!(silo_after_project_create.rcgen > silo.rcgen); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_session_methods() { let logctx = dev::test_setup_log("test_session_methods"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authn_opctx = OpContext::for_background( logctx.log.new(o!("component" => "TestExternalAuthn")), Arc::new(authz::Authz::new(&logctx.log)), @@ -654,7 +673,7 @@ mod test { datastore.session_hard_delete(&opctx, &authz_session).await; assert_eq!(delete_again, Ok(())); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -984,8 +1003,8 @@ mod test { /// pool IDs should not matter. async fn test_region_allocation_strat_random() { let logctx = dev::test_setup_log("test_region_allocation_strat_random"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_datasets = TestDatasets::create( &opctx, datastore.clone(), @@ -1062,7 +1081,7 @@ mod test { } } - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1074,8 +1093,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_strat_random_with_distinct_sleds", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a rack with enough sleds for a successful allocation when we // require 3 distinct eligible sleds. @@ -1151,7 +1170,7 @@ mod test { } } - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1162,8 +1181,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_strat_random_with_distinct_sleds_fails", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a rack without enough sleds for a successful allocation when // we require 3 distinct provisionable sleds. @@ -1207,7 +1226,7 @@ mod test { assert!(matches!(err, Error::InsufficientCapacity { .. })); } - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1215,8 +1234,8 @@ mod test { async fn test_region_allocation_is_idempotent() { let logctx = dev::test_setup_log("test_region_allocation_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); TestDatasets::create( &opctx, datastore.clone(), @@ -1273,7 +1292,7 @@ mod test { assert_eq!(dataset_and_regions1[i], dataset_and_regions2[i],); } - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1282,8 +1301,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_only_operates_on_zpools_in_inventory", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1373,7 +1392,7 @@ mod test { .await .expect("Allocation should have worked after adding zpools to inventory"); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1381,8 +1400,8 @@ mod test { async fn test_region_allocation_not_enough_zpools() { let logctx = dev::test_setup_log("test_region_allocation_not_enough_zpools"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1458,7 +1477,7 @@ mod test { assert!(matches!(err, Error::InsufficientCapacity { .. })); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1467,8 +1486,8 @@ mod test { let logctx = dev::test_setup_log( "test_region_allocation_only_considers_disks_in_service", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a sled... let sled_id = create_test_sled(&datastore).await; @@ -1576,7 +1595,7 @@ mod test { } } - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1584,8 +1603,8 @@ mod test { async fn test_region_allocation_out_of_space_fails() { let logctx = dev::test_setup_log("test_region_allocation_out_of_space_fails"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); TestDatasets::create( &opctx, @@ -1610,7 +1629,7 @@ mod test { .await .is_err()); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1621,11 +1640,8 @@ mod test { use omicron_common::api::external; let logctx = dev::test_setup_log("test_queries_do_not_require_full_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); - let datastore = - DataStore::new(&logctx.log, Arc::new(pool), None).await.unwrap(); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); let explanation = DataStore::get_allocated_regions_query(Uuid::nil()) .explain_async(&conn) @@ -1656,7 +1672,7 @@ mod test { explanation, ); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -1666,15 +1682,8 @@ mod test { use std::net::Ipv6Addr; let logctx = dev::test_setup_log("test_sled_ipv6_address_allocation"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); let addr1 = "[fd00:1de::1]:12345".parse().unwrap(); @@ -1714,15 +1723,15 @@ mod test { let expected_ip = Ipv6Addr::new(0xfd00, 0x1df, 0, 0, 0, 0, 1, 0); assert_eq!(ip, expected_ip); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_ssh_keys() { let logctx = dev::test_setup_log("test_ssh_keys"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a new Silo user so that we can lookup their keys. let authz_silo = authz::Silo::new( @@ -1798,15 +1807,15 @@ mod test { datastore.ssh_key_delete(&opctx, &authz_ssh_key).await.unwrap(); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_rack_initialize_is_idempotent() { let logctx = dev::test_setup_log("test_rack_initialize_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a Rack, insert it into the DB. let rack = Rack::new(Uuid::new_v4()); @@ -1839,15 +1848,15 @@ mod test { .unwrap(); assert!(result.initialized); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_table_scan() { let logctx = dev::test_setup_log("test_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let error = datastore.test_try_table_scan(&opctx).await; println!("error from attempted table scan: {:#}", error); @@ -1865,7 +1874,7 @@ mod test { } // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1877,8 +1886,8 @@ mod test { let logctx = dev::test_setup_log( "test_deallocate_external_ip_by_instance_id_is_idempotent", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a few records. @@ -1932,7 +1941,7 @@ mod test { .expect("Failed to delete instance external IPs"); assert_eq!(count, 0, "Expected to delete zero IPs for the instance"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1943,8 +1952,8 @@ mod test { let logctx = dev::test_setup_log("test_deallocate_external_ip_is_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let conn = datastore.pool_connection_for_tests().await.unwrap(); // Create a record. @@ -1998,7 +2007,7 @@ mod test { .await .is_err()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2011,8 +2020,8 @@ mod test { use diesel::result::Error::DatabaseError; let logctx = dev::test_setup_log("test_external_ip_check_constraints"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); let now = Utc::now(); @@ -2246,7 +2255,7 @@ mod test { } } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/network_interface.rs b/nexus/db-queries/src/db/datastore/network_interface.rs index 1b1ff8a75b..b7f0622609 100644 --- a/nexus/db-queries/src/db/datastore/network_interface.rs +++ b/nexus/db-queries/src/db/datastore/network_interface.rs @@ -892,10 +892,9 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_config::NUM_INITIAL_RESERVED_IP_ADDRESSES; use nexus_db_fixed_data::vpc_subnet::NEXUS_VPC_SUBNET; - use nexus_test_utils::db::test_setup_database; use omicron_common::address::NEXUS_OPTE_IPV4_SUBNET; use omicron_test_utils::dev; use std::collections::BTreeSet; @@ -924,8 +923,8 @@ mod tests { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_service_network_interfaces_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start let nics = read_all_service_nics(&datastore, &opctx).await; @@ -991,7 +990,7 @@ mod tests { let nics = read_all_service_nics(&datastore, &opctx).await; assert_eq!(nics, service_nics); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/oximeter.rs b/nexus/db-queries/src/db/datastore/oximeter.rs index f43ab4a051..be5ddb91bb 100644 --- a/nexus/db-queries/src/db/datastore/oximeter.rs +++ b/nexus/db-queries/src/db/datastore/oximeter.rs @@ -292,8 +292,7 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; - use db::datastore::pub_test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use nexus_types::internal_api::params; use omicron_common::api::internal::nexus; use omicron_test_utils::dev; @@ -345,9 +344,8 @@ mod tests { async fn test_oximeter_expunge() { // Setup let logctx = dev::test_setup_log("test_oximeter_expunge"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let mut collector_ids = @@ -447,7 +445,7 @@ mod tests { assert_eq!(expunged1a, expunged1b); // Cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -457,9 +455,8 @@ mod tests { let logctx = dev::test_setup_log( "test_producer_endpoint_reassigns_if_oximeter_expunged", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert an Oximeter collector. let oximeter1_id = Uuid::new_v4(); @@ -574,7 +571,7 @@ mod tests { } // Cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -584,9 +581,8 @@ mod tests { let logctx = dev::test_setup_log( "test_producer_endpoint_upsert_rejects_expunged_oximeters", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -684,7 +680,7 @@ mod tests { ); // Cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -692,9 +688,8 @@ mod tests { async fn test_oximeter_reassigns_randomly() { // Setup let logctx = dev::test_setup_log("test_oximeter_reassigns_randomly"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -788,7 +783,7 @@ mod tests { assert_eq!(producer_counts[1..].iter().sum::(), 1000); // Cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -798,9 +793,8 @@ mod tests { let logctx = dev::test_setup_log( "test_oximeter_reassign_fails_if_no_collectors", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert a few Oximeter collectors. let collector_ids = (0..4).map(|_| Uuid::new_v4()).collect::>(); @@ -895,7 +889,7 @@ mod tests { assert_eq!(nproducers, 100); // Cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -903,9 +897,8 @@ mod tests { async fn test_producers_list_expired() { // Setup let logctx = dev::test_setup_log("test_producers_list_expired"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Insert an Oximeter collector let collector_info = OximeterInfo::new(¶ms::OximeterInfo { @@ -972,7 +965,7 @@ mod tests { .await; assert_eq!(expired_producers.as_slice(), &[]); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/physical_disk.rs b/nexus/db-queries/src/db/datastore/physical_disk.rs index 61852e454e..73aa837af8 100644 --- a/nexus/db-queries/src/db/datastore/physical_disk.rs +++ b/nexus/db-queries/src/db/datastore/physical_disk.rs @@ -321,10 +321,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::datastore::test_utils::datastore_test; use crate::db::lookup::LookupPath; use crate::db::model::{PhysicalDiskKind, Sled, SledUpdate}; use dropshot::PaginationOrder; @@ -332,7 +332,6 @@ mod test { use nexus_sled_agent_shared::inventory::{ Baseboard, Inventory, InventoryDisk, OmicronZonesConfig, SledRole, }; - use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external::ByteCount; use omicron_common::disk::{DiskIdentity, DiskVariant}; @@ -372,8 +371,8 @@ mod test { async fn physical_disk_insert_same_uuid_collides() { let logctx = dev::test_setup_log("physical_disk_insert_same_uuid_collides"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; let sled_id = sled.id(); @@ -405,7 +404,7 @@ mod test { "{err}" ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -413,8 +412,8 @@ mod test { async fn physical_disk_insert_different_disks() { let logctx = dev::test_setup_log("physical_disk_insert_different_disks"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; let sled_id = sled.id(); @@ -454,15 +453,15 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 2); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn physical_disk_deletion_idempotency() { let logctx = dev::test_setup_log("physical_disk_deletion_idempotency"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; @@ -504,7 +503,7 @@ mod test { .await .expect("Failed to delete disk"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -518,8 +517,8 @@ mod test { let logctx = dev::test_setup_log( "physical_disk_insert_delete_reupsert_new_sled", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -592,7 +591,7 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 1); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -607,8 +606,8 @@ mod test { async fn physical_disk_insert_reupsert_new_sled() { let logctx = dev::test_setup_log("physical_disk_insert_reupsert_new_sled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -670,7 +669,7 @@ mod test { .expect("Failed to list physical disks"); assert_eq!(disks.len(), 1); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -776,8 +775,8 @@ mod test { async fn physical_disk_cannot_insert_to_expunged_sled() { let logctx = dev::test_setup_log("physical_disk_cannot_insert_to_expunged_sled"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore).await; @@ -814,15 +813,15 @@ mod test { "Expected string: {expected} within actual error: {actual}", ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn physical_disk_uninitialized_list() { let logctx = dev::test_setup_log("physical_disk_uninitialized_list"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled_a = create_test_sled(&datastore).await; let sled_b = create_test_sled(&datastore).await; @@ -1000,7 +999,7 @@ mod test { .expect("Failed to list uninitialized disks"); assert_eq!(uninitialized_disks.len(), 0); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs index bcf6a6c80f..1e3343a165 100644 --- a/nexus/db-queries/src/db/datastore/pub_test_utils.rs +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -12,32 +12,134 @@ use crate::authz; use crate::context::OpContext; use crate::db; use crate::db::DataStore; -use dropshot::test_util::LogContext; use omicron_test_utils::dev::db::CockroachInstance; +use slog::Logger; use std::sync::Arc; use uuid::Uuid; +#[cfg(test)] +mod test { + use super::*; + use nexus_test_utils::db::test_setup_database; + + enum TestKind { + Pool { pool: Arc }, + RawDatastore { datastore: Arc }, + Datastore { opctx: OpContext, datastore: Arc }, + } + + /// A test database with a pool connected to it. + pub struct TestDatabase { + db: CockroachInstance, + + kind: TestKind, + } + + impl TestDatabase { + /// Creates a new database for test usage, with a pool. + /// + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. + pub async fn new_with_pool(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); + Self { db, kind: TestKind::Pool { pool } } + } + + /// Creates a new database for test usage, with a pre-loaded datastore. + /// + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. + pub async fn new_with_datastore(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let (opctx, datastore) = + crate::db::datastore::test_utils::datastore_test(log, &db) + .await; + + Self { db, kind: TestKind::Datastore { opctx, datastore } } + } + + /// Creates a new database for test usage, with a raw datastore. + /// + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. + pub async fn new_with_raw_datastore(log: &Logger) -> Self { + let db = test_setup_database(log).await; + let cfg = db::Config { url: db.pg_config().clone() }; + let pool = Arc::new(db::Pool::new_single_host(log, &cfg)); + let datastore = + Arc::new(DataStore::new(&log, pool, None).await.unwrap()); + Self { db, kind: TestKind::RawDatastore { datastore } } + } + + pub fn pool(&self) -> &Arc { + match &self.kind { + TestKind::Pool { pool } => pool, + TestKind::RawDatastore { .. } | TestKind::Datastore { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_pool`"); + } + } + } + + pub fn opctx(&self) -> &OpContext { + match &self.kind { + TestKind::Pool { .. } | TestKind::RawDatastore { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); + } + TestKind::Datastore { opctx, .. } => opctx, + } + } + + pub fn datastore(&self) -> &Arc { + match &self.kind { + TestKind::Pool { .. } => { + panic!("Wrong test type; try using `TestDatabase::new_with_datastore`"); + } + TestKind::RawDatastore { datastore } => datastore, + TestKind::Datastore { datastore, .. } => datastore, + } + } + + /// Shuts down both the database and the pool + pub async fn terminate(mut self) { + match self.kind { + TestKind::Pool { pool } => pool.terminate().await, + TestKind::RawDatastore { datastore } => { + datastore.terminate().await + } + TestKind::Datastore { datastore, .. } => { + datastore.terminate().await + } + } + self.db.cleanup().await.unwrap(); + } + } +} + +#[cfg(test)] +pub use test::TestDatabase; + /// Constructs a DataStore for use in test suites that has preloaded the /// built-in users, roles, and role assignments that are needed for basic /// operation #[cfg(any(test, feature = "testing"))] pub async fn datastore_test( - logctx: &LogContext, + log: &Logger, db: &CockroachInstance, rack_id: Uuid, ) -> (OpContext, Arc) { use crate::authn; let cfg = db::Config { url: db.pg_config().clone() }; - let pool = Arc::new(db::Pool::new_single_host(&logctx.log, &cfg)); - let datastore = - Arc::new(DataStore::new(&logctx.log, pool, None).await.unwrap()); + let pool = Arc::new(db::Pool::new_single_host(&log, &cfg)); + let datastore = Arc::new(DataStore::new(&log, pool, None).await.unwrap()); // Create an OpContext with the credentials of "db-init" just for the // purpose of loading the built-in users, roles, and assignments. let opctx = OpContext::for_background( - logctx.log.new(o!()), - Arc::new(authz::Authz::new(&logctx.log)), + log.new(o!()), + Arc::new(authz::Authz::new(&log)), authn::Context::internal_db_init(), Arc::clone(&datastore) as Arc, ); @@ -60,7 +162,7 @@ pub async fn datastore_test( // Create an OpContext with the credentials of "test-privileged" for general // testing. let opctx = OpContext::for_tests( - logctx.log.new(o!()), + log.new(o!()), Arc::clone(&datastore) as Arc, ); diff --git a/nexus/db-queries/src/db/datastore/rack.rs b/nexus/db-queries/src/db/datastore/rack.rs index 27b64eed2f..ea0ef57908 100644 --- a/nexus/db-queries/src/db/datastore/rack.rs +++ b/nexus/db-queries/src/db/datastore/rack.rs @@ -997,10 +997,10 @@ impl DataStore { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; - use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::Discoverability; use crate::db::model::ExternalIp; use crate::db::model::IpKind; @@ -1015,7 +1015,6 @@ mod test { SledBuilder, SystemDescription, }; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::BlueprintZonesConfig; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::{ @@ -1131,8 +1130,8 @@ mod test { #[tokio::test] async fn rack_set_initialized_empty() { let logctx = dev::test_setup_log("rack_set_initialized_empty"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let before = Utc::now(); let rack_init = RackInit::default(); @@ -1233,7 +1232,7 @@ mod test { .unwrap(); assert_eq!(dns_internal, dns_internal2); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1317,8 +1316,8 @@ mod test { async fn rack_set_initialized_with_services() { let test_name = "rack_set_initialized_with_services"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled1 = create_test_sled(&datastore, Uuid::new_v4()).await; let sled2 = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -1661,7 +1660,7 @@ mod test { let observed_datasets = get_all_datasets(&datastore).await; assert!(observed_datasets.is_empty()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1669,8 +1668,8 @@ mod test { async fn rack_set_initialized_with_many_nexus_services() { let test_name = "rack_set_initialized_with_many_nexus_services"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -1946,7 +1945,7 @@ mod test { Some(&external_records) ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1955,8 +1954,8 @@ mod test { let test_name = "rack_set_initialized_missing_service_pool_ip_throws_error"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -2046,7 +2045,7 @@ mod test { assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2054,8 +2053,8 @@ mod test { async fn rack_set_initialized_overlapping_ips_throws_error() { let test_name = "rack_set_initialized_overlapping_ips_throws_error"; let logctx = dev::test_setup_log(test_name); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sled = create_test_sled(&datastore, Uuid::new_v4()).await; @@ -2195,15 +2194,15 @@ mod test { assert!(get_all_datasets(&datastore).await.is_empty()); assert!(get_all_external_ips(&datastore).await.is_empty()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn rack_sled_subnet_allocations() { let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); @@ -2288,15 +2287,15 @@ mod test { allocations.iter().map(|a| a.subnet_octet).collect::>() ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn allocate_sled_underlay_subnet_octets() { let logctx = dev::test_setup_log("rack_sled_subnet_allocations"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id = Uuid::new_v4(); @@ -2482,7 +2481,7 @@ mod test { next_expected_octet += 1; } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/region_replacement.rs b/nexus/db-queries/src/db/datastore/region_replacement.rs index b9b5a0827f..508e80a63b 100644 --- a/nexus/db-queries/src/db/datastore/region_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_replacement.rs @@ -895,15 +895,14 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn test_one_replacement_per_volume() { let logctx = dev::test_setup_log("test_one_replacement_per_volume"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_1_id = Uuid::new_v4(); let region_2_id = Uuid::new_v4(); @@ -921,7 +920,7 @@ mod test { .await .unwrap_err(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -934,8 +933,8 @@ mod test { let logctx = dev::test_setup_log( "test_replacement_done_in_middle_of_drive_saga", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); let volume_id = Uuid::new_v4(); @@ -1014,7 +1013,7 @@ mod test { ); assert_eq!(actual_request.operating_saga_id, None); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1026,8 +1025,8 @@ mod test { let logctx = dev::test_setup_log( "test_replacement_done_in_middle_of_finish_saga", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let region_id = Uuid::new_v4(); let volume_id = Uuid::new_v4(); @@ -1081,7 +1080,7 @@ mod test { .await .unwrap(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs index 25751e3920..d9c8a8b258 100644 --- a/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs +++ b/nexus/db-queries/src/db/datastore/region_snapshot_replacement.rs @@ -1059,16 +1059,15 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::RegionReplacement; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; #[tokio::test] async fn test_one_replacement_per_volume() { let logctx = dev::test_setup_log("test_one_replacement_per_volume"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_1_id = Uuid::new_v4(); let region_1_id = Uuid::new_v4(); @@ -1106,7 +1105,7 @@ mod test { .await .unwrap_err(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1115,8 +1114,8 @@ mod test { let logctx = dev::test_setup_log( "test_one_replacement_per_volume_conflict_with_region", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_1_id = Uuid::new_v4(); let region_1_id = Uuid::new_v4(); @@ -1146,15 +1145,15 @@ mod test { .await .unwrap_err(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn count_replacement_steps() { let logctx = dev::test_setup_log("count_replacement_steps"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let dataset_id = Uuid::new_v4(); let region_id = Uuid::new_v4(); @@ -1300,7 +1299,7 @@ mod test { 1, ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1309,8 +1308,8 @@ mod test { let logctx = dev::test_setup_log( "unique_region_snapshot_replacement_step_per_volume", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Ensure that only one non-complete replacement step can be inserted // per volume. @@ -1401,15 +1400,15 @@ mod test { .await .unwrap(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn region_snapshot_replacement_step_gc() { let logctx = dev::test_setup_log("region_snapshot_replacement_step_gc"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let mut request = RegionSnapshotReplacement::new( Uuid::new_v4(), @@ -1470,7 +1469,7 @@ mod test { .len(), ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1478,8 +1477,8 @@ mod test { async fn region_snapshot_replacement_step_conflict() { let logctx = dev::test_setup_log("region_snapshot_replacement_step_conflict"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Assert that a region snapshot replacement step cannot be created for // a volume that is the "old snapshot volume" for another snapshot @@ -1520,7 +1519,7 @@ mod test { } ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1530,8 +1529,8 @@ mod test { let logctx = dev::test_setup_log( "region_snapshot_replacement_step_conflict_with_region_replacement", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Assert that a region snapshot replacement step cannot be performed on // a volume if region replacement is occurring for that volume. @@ -1553,7 +1552,7 @@ mod test { .await .unwrap_err(); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/saga.rs b/nexus/db-queries/src/db/datastore/saga.rs index 0b626804e1..f1f0bd18cc 100644 --- a/nexus/db-queries/src/db/datastore/saga.rs +++ b/nexus/db-queries/src/db/datastore/saga.rs @@ -259,12 +259,11 @@ impl DataStore { #[cfg(test)] mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::AsyncConnection; use async_bb8_diesel::AsyncSimpleConnection; use db::queries::ALLOW_FULL_TABLE_SCAN_SQL; use nexus_db_model::{SagaNodeEvent, SecId}; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::Generation; use omicron_test_utils::dev; use rand::seq::SliceRandom; @@ -276,8 +275,8 @@ mod test { async fn test_list_candidate_sagas() { // Test setup let logctx = dev::test_setup_log("test_list_candidate_sagas"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let sec_id = db::SecId(uuid::Uuid::new_v4()); let mut inserted_sagas = (0..SQL_BATCH_SIZE.get() * 2) .map(|_| SagaTestContext::new(sec_id).new_running_db_saga()) @@ -324,7 +323,7 @@ mod test { ); // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -333,8 +332,8 @@ mod test { async fn test_list_unfinished_nodes() { // Test setup let logctx = dev::test_setup_log("test_list_unfinished_nodes"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Create a couple batches of saga events @@ -399,7 +398,7 @@ mod test { } // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -408,8 +407,8 @@ mod test { async fn test_list_no_unfinished_nodes() { // Test setup let logctx = dev::test_setup_log("test_list_no_unfinished_nodes"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let saga_id = steno::SagaId(Uuid::new_v4()); // Test that this returns "no nodes" rather than throwing some "not @@ -424,7 +423,7 @@ mod test { assert_eq!(observed_nodes.len(), 0); // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -432,8 +431,8 @@ mod test { async fn test_create_event_idempotent() { // Test setup let logctx = dev::test_setup_log("test_create_event_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Generate a bunch of events. @@ -466,7 +465,7 @@ mod test { } // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -474,8 +473,8 @@ mod test { async fn test_update_state_idempotent() { // Test setup let logctx = dev::test_setup_log("test_create_event_idempotent"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let node_cx = SagaTestContext::new(SecId(Uuid::new_v4())); // Create a saga in the running state. @@ -518,7 +517,7 @@ mod test { .expect("updating state to Done again"); // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -563,9 +562,8 @@ mod test { async fn test_saga_reassignment() { // Test setup let logctx = dev::test_setup_log("test_saga_reassignment"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = datastore_test(&logctx, &db).await; - let opctx = OpContext::for_tests(logctx.log.clone(), datastore.clone()); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Populate the database with a few different sagas: // @@ -707,7 +705,7 @@ mod test { assert_eq!(nreassigned, 0); // Test cleanup - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/sled.rs b/nexus/db-queries/src/db/datastore/sled.rs index 9e9d8d1828..8e37d7ae7f 100644 --- a/nexus/db-queries/src/db/datastore/sled.rs +++ b/nexus/db-queries/src/db/datastore/sled.rs @@ -824,12 +824,12 @@ impl TransitionError { #[cfg(test)] pub(in crate::db::datastore) mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::{ sled_baseboard_for_test, sled_system_hardware_for_test, }; use crate::db::datastore::test_utils::{ - datastore_test, sled_set_policy, sled_set_state, Expected, - IneligibleSleds, + sled_set_policy, sled_set_state, Expected, IneligibleSleds, }; use crate::db::lookup::LookupPath; use crate::db::model::ByteCount; @@ -841,7 +841,6 @@ pub(in crate::db::datastore) mod test { use nexus_db_model::PhysicalDiskKind; use nexus_db_model::PhysicalDiskPolicy; use nexus_db_model::PhysicalDiskState; - use nexus_test_utils::db::test_setup_database; use nexus_types::identity::Asset; use omicron_common::api::external; use omicron_test_utils::dev; @@ -857,8 +856,8 @@ pub(in crate::db::datastore) mod test { #[tokio::test] async fn upsert_sled_updates_hardware() { let logctx = dev::test_setup_log("upsert_sled_updates_hardware"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -908,7 +907,7 @@ pub(in crate::db::datastore) mod test { ); assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -917,8 +916,8 @@ pub(in crate::db::datastore) mod test { let logctx = dev::test_setup_log( "upsert_sled_updates_fails_with_stale_sled_agent_gen", ); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -972,7 +971,7 @@ pub(in crate::db::datastore) mod test { assert_eq!(observed_sled.reservoir_size, sled_update.reservoir_size); assert_eq!(observed_sled.sled_agent_gen, sled_update.sled_agent_gen); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -980,8 +979,8 @@ pub(in crate::db::datastore) mod test { async fn upsert_sled_doesnt_update_decommissioned() { let logctx = dev::test_setup_log("upsert_sled_doesnt_update_decommissioned"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let mut sled_update = test_new_sled_update(); let (observed_sled, _) = @@ -1050,7 +1049,7 @@ pub(in crate::db::datastore) mod test { "reservoir_size should not have changed" ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1059,8 +1058,8 @@ pub(in crate::db::datastore) mod test { async fn sled_reservation_create_non_provisionable() { let logctx = dev::test_setup_log("sled_reservation_create_non_provisionable"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Define some sleds that resources cannot be provisioned on. let (non_provisionable_sled, _) = @@ -1137,7 +1136,7 @@ pub(in crate::db::datastore) mod test { .unwrap(); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1164,9 +1163,8 @@ pub(in crate::db::datastore) mod test { async fn test_sled_expungement_also_expunges_disks() { let logctx = dev::test_setup_log("test_sled_expungement_also_expunges_disks"); - let mut db = test_setup_database(&logctx.log).await; - - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Set up a sled to test against. let (sled, _) = @@ -1262,7 +1260,7 @@ pub(in crate::db::datastore) mod test { lookup_physical_disk(&datastore, disk2.id()).await.disk_state ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1270,9 +1268,8 @@ pub(in crate::db::datastore) mod test { async fn test_sled_transitions() { // Test valid and invalid state and policy transitions. let logctx = dev::test_setup_log("test_sled_transitions"); - let mut db = test_setup_database(&logctx.log).await; - - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // This test generates all possible sets of transitions. Below, we list // the before and after predicates for valid transitions. @@ -1386,7 +1383,7 @@ pub(in crate::db::datastore) mod test { .unwrap(); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1506,8 +1503,8 @@ pub(in crate::db::datastore) mod test { #[tokio::test] async fn sled_list_batch() { let logctx = dev::test_setup_log("sled_list_batch"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let size = usize::try_from(2 * SQL_BATCH_SIZE.get()).unwrap(); let mut new_sleds = Vec::with_capacity(size); @@ -1548,7 +1545,7 @@ pub(in crate::db::datastore) mod test { assert_eq!(expected_ids, found_ids); assert_eq!(found_ids.len(), size); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 61335ccab4..b332c57798 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1619,9 +1619,8 @@ async fn do_switch_port_settings_delete( #[cfg(test)] mod test { - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::UpdatePrecondition; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params::{ BgpAnnounceSetCreate, BgpConfigCreate, BgpPeerConfig, SwitchPortConfigCreate, SwitchPortGeometry, SwitchPortSettingsCreate, @@ -1637,8 +1636,8 @@ mod test { #[tokio::test] async fn test_bgp_boundary_switches() { let logctx = dev::test_setup_log("test_bgp_boundary_switches"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let rack_id: Uuid = nexus_test_utils::RACK_UUID.parse().expect("parse uuid"); @@ -1738,7 +1737,7 @@ mod test { assert_eq!(uplink_ports.len(), 1); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/test_utils.rs b/nexus/db-queries/src/db/datastore/test_utils.rs index 4678e07f47..75d8833873 100644 --- a/nexus/db-queries/src/db/datastore/test_utils.rs +++ b/nexus/db-queries/src/db/datastore/test_utils.rs @@ -13,7 +13,6 @@ use anyhow::bail; use anyhow::ensure; use anyhow::Context; use anyhow::Result; -use dropshot::test_util::LogContext; use futures::future::try_join_all; use nexus_db_model::SledState; use nexus_types::external_api::views::SledPolicy; @@ -21,16 +20,17 @@ use nexus_types::external_api::views::SledProvisionPolicy; use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::SledUuid; +use slog::Logger; use std::sync::Arc; use strum::EnumCount; use uuid::Uuid; pub(crate) async fn datastore_test( - logctx: &LogContext, + log: &Logger, db: &CockroachInstance, ) -> (OpContext, Arc) { let rack_id = Uuid::parse_str(nexus_test_utils::RACK_UUID).unwrap(); - super::pub_test_utils::datastore_test(logctx, db, rack_id).await + super::pub_test_utils::datastore_test(log, db, rack_id).await } /// Denotes a specific way in which a sled is ineligible. diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index e838d38d37..a72c032125 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -326,12 +326,11 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::lookup::LookupPath; use nexus_db_model::Instance; use nexus_db_model::Project; use nexus_db_model::SiloQuotasUpdate; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::silo::DEFAULT_SILO_ID; use omicron_common::api::external::IdentityMetadataCreateParams; @@ -469,8 +468,8 @@ mod test { #[tokio::test] async fn test_instance_create_and_delete() { let logctx = dev::test_setup_log("test_instance_create_and_delete"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -531,7 +530,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -539,8 +538,8 @@ mod test { async fn test_instance_create_and_delete_twice() { let logctx = dev::test_setup_log("test_instance_create_and_delete_twice"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -644,15 +643,15 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn test_storage_create_and_delete() { let logctx = dev::test_setup_log("test_storage_create_and_delete"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -699,7 +698,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -707,8 +706,8 @@ mod test { async fn test_storage_create_and_delete_twice() { let logctx = dev::test_setup_log("test_storage_create_and_delete_twice"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let test_data = setup_collections(&datastore, &opctx).await; let ids = test_data.ids(); @@ -797,7 +796,7 @@ mod test { verify_collection_usage(&datastore, &opctx, id, 0, 0, 0).await; } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/vmm.rs b/nexus/db-queries/src/db/datastore/vmm.rs index 77b8069a36..e578bb1696 100644 --- a/nexus/db-queries/src/db/datastore/vmm.rs +++ b/nexus/db-queries/src/db/datastore/vmm.rs @@ -441,12 +441,11 @@ impl DataStore { mod tests { use super::*; use crate::db; - use crate::db::datastore::test_utils::datastore_test; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::Generation; use crate::db::model::Migration; use crate::db::model::VmmRuntimeState; use crate::db::model::VmmState; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::internal::nexus; use omicron_test_utils::dev; use omicron_uuid_kinds::InstanceUuid; @@ -456,8 +455,8 @@ mod tests { // Setup let logctx = dev::test_setup_log("test_vmm_and_migration_update_runtime"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let instance_id = InstanceUuid::from_untyped_uuid(Uuid::new_v4()); let vmm1 = datastore @@ -724,7 +723,7 @@ mod tests { ); // Clean up. - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index 3bd0ef41ed..93ba737eb5 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -2780,8 +2780,7 @@ impl DataStore { mod tests { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; use sled_agent_client::types::CrucibleOpts; @@ -2792,13 +2791,13 @@ mod tests { let logctx = dev::test_setup_log("test_deserialize_old_crucible_resources"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Start with a fake volume, doesn't matter if it's empty let volume_id = Uuid::new_v4(); - let _volume = db_datastore + let _volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -2819,8 +2818,7 @@ mod tests { { use db::schema::volume::dsl; - let conn = - db_datastore.pool_connection_unauthorized().await.unwrap(); + let conn = datastore.pool_connection_unauthorized().await.unwrap(); let resources_to_clean_up = r#"{ "V1": { @@ -2867,14 +2865,14 @@ mod tests { // Soft delete the volume - let cr = db_datastore.soft_delete_volume(volume_id).await.unwrap(); + let cr = datastore.soft_delete_volume(volume_id).await.unwrap(); // Assert the contents of the returned CrucibleResources let datasets_and_regions = - db_datastore.regions_to_delete(&cr).await.unwrap(); + datastore.regions_to_delete(&cr).await.unwrap(); let datasets_and_snapshots = - db_datastore.snapshots_to_delete(&cr).await.unwrap(); + datastore.snapshots_to_delete(&cr).await.unwrap(); assert!(datasets_and_regions.is_empty()); assert_eq!(datasets_and_snapshots.len(), 1); @@ -2887,7 +2885,7 @@ mod tests { ); assert_eq!(region_snapshot.deleting, false); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2895,8 +2893,8 @@ mod tests { async fn test_volume_replace_region() { let logctx = dev::test_setup_log("test_volume_replace_region"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Insert four Region records (three, plus one additionally allocated) @@ -2911,7 +2909,7 @@ mod tests { ]; { - let conn = db_datastore.pool_connection_for_tests().await.unwrap(); + let conn = datastore.pool_connection_for_tests().await.unwrap(); for i in 0..4 { let (_, volume_id) = region_and_volume_ids[i]; @@ -2937,7 +2935,7 @@ mod tests { } } - let _volume = db_datastore + let _volume = datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -2977,7 +2975,7 @@ mod tests { let target = region_and_volume_ids[0]; let replacement = region_and_volume_ids[3]; - let volume_replace_region_result = db_datastore + let volume_replace_region_result = datastore .volume_replace_region( /* target */ db::datastore::VolumeReplacementParams { @@ -3002,7 +3000,7 @@ mod tests { assert_eq!(volume_replace_region_result, VolumeReplaceResult::Done); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3039,7 +3037,7 @@ mod tests { ); // Now undo the replacement. Note volume ID is not swapped. - let volume_replace_region_result = db_datastore + let volume_replace_region_result = datastore .volume_replace_region( /* target */ db::datastore::VolumeReplacementParams { @@ -3064,7 +3062,7 @@ mod tests { assert_eq!(volume_replace_region_result, VolumeReplaceResult::Done); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3100,7 +3098,7 @@ mod tests { }, ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3108,8 +3106,8 @@ mod tests { async fn test_volume_replace_snapshot() { let logctx = dev::test_setup_log("test_volume_replace_snapshot"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (_opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let datastore = db.datastore(); // Insert two volumes: one with the target to replace, and one temporary // "volume to delete" that's blank. @@ -3118,7 +3116,7 @@ mod tests { let volume_to_delete_id = Uuid::new_v4(); let rop_id = Uuid::new_v4(); - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3177,7 +3175,7 @@ mod tests { .await .unwrap(); - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_to_delete_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3193,7 +3191,7 @@ mod tests { // Do the replacement - let volume_replace_snapshot_result = db_datastore + let volume_replace_snapshot_result = datastore .volume_replace_snapshot( VolumeWithTarget(volume_id), ExistingTarget("[fd00:1122:3344:104::1]:400".parse().unwrap()), @@ -3210,7 +3208,7 @@ mod tests { // Ensure the shape of the resulting VCRs let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3270,7 +3268,7 @@ mod tests { ); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore + datastore .volume_get(volume_to_delete_id) .await .unwrap() @@ -3311,7 +3309,7 @@ mod tests { // Now undo the replacement. Note volume ID is not swapped. - let volume_replace_snapshot_result = db_datastore + let volume_replace_snapshot_result = datastore .volume_replace_snapshot( VolumeWithTarget(volume_id), ExistingTarget("[fd55:1122:3344:101::1]:111".parse().unwrap()), @@ -3326,7 +3324,7 @@ mod tests { assert_eq!(volume_replace_snapshot_result, VolumeReplaceResult::Done,); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore.volume_get(volume_id).await.unwrap().unwrap().data(), + datastore.volume_get(volume_id).await.unwrap().unwrap().data(), ) .unwrap(); @@ -3387,7 +3385,7 @@ mod tests { ); let vcr: VolumeConstructionRequest = serde_json::from_str( - db_datastore + datastore .volume_get(volume_to_delete_id) .await .unwrap() @@ -3426,7 +3424,7 @@ mod tests { }, ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3435,14 +3433,14 @@ mod tests { let logctx = dev::test_setup_log("test_find_volumes_referencing_socket_addr"); let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let (opctx, db_datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let volume_id = Uuid::new_v4(); // case where the needle is found - db_datastore + datastore .volume_create(nexus_db_model::Volume::new( volume_id, serde_json::to_string(&VolumeConstructionRequest::Volume { @@ -3479,7 +3477,7 @@ mod tests { .await .unwrap(); - let volumes = db_datastore + let volumes = datastore .find_volumes_referencing_socket_addr( &opctx, "[fd00:1122:3344:104::1]:400".parse().unwrap(), @@ -3492,7 +3490,7 @@ mod tests { // case where the needle is missing - let volumes = db_datastore + let volumes = datastore .find_volumes_referencing_socket_addr( &opctx, "[fd55:1122:3344:104::1]:400".parse().unwrap(), @@ -3502,7 +3500,7 @@ mod tests { assert!(volumes.is_empty()); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/datastore/volume_repair.rs b/nexus/db-queries/src/db/datastore/volume_repair.rs index c4e9f8b090..115244f347 100644 --- a/nexus/db-queries/src/db/datastore/volume_repair.rs +++ b/nexus/db-queries/src/db/datastore/volume_repair.rs @@ -100,15 +100,14 @@ impl DataStore { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; #[tokio::test] async fn volume_lock_conflict_error_returned() { let logctx = dev::test_setup_log("volume_lock_conflict_error_returned"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let lock_1 = Uuid::new_v4(); let lock_2 = Uuid::new_v4(); @@ -123,7 +122,7 @@ mod test { assert!(matches!(err, Error::Conflict { .. })); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 473efb2575..e3bd33e0a4 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -2755,9 +2755,9 @@ impl DataStore { #[cfg(test)] mod tests { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::test::sled_baseboard_for_test; use crate::db::datastore::test::sled_system_hardware_for_test; - use crate::db::datastore::test_utils::datastore_test; use crate::db::datastore::test_utils::IneligibleSleds; use crate::db::model::Project; use crate::db::queries::vpc::MAX_VNI_SEARCH_RANGE_SIZE; @@ -2768,7 +2768,6 @@ mod tests { use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::system::SledBuilder; use nexus_reconfigurator_planning::system::SystemDescription; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintTarget; use nexus_types::deployment::BlueprintZoneConfig; @@ -2798,8 +2797,8 @@ mod tests { "test_project_create_vpc_raw_returns_none_on_vni_exhaustion", ); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a project. let project_params = params::ProjectCreate { @@ -2889,7 +2888,7 @@ mod tests { else { panic!("Expected Ok(None) when creating a VPC without any available VNIs"); }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -2902,8 +2901,8 @@ mod tests { usdt::register_probes().unwrap(); let logctx = dev::test_setup_log("test_project_create_vpc_retries"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Create a project. let project_params = params::ProjectCreate { @@ -2999,7 +2998,7 @@ mod tests { } Err(e) => panic!("Unexpected error when inserting VPC: {e}"), }; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3047,8 +3046,8 @@ mod tests { let logctx = dev::test_setup_log( "test_vpc_resolve_to_sleds_uses_current_target_blueprint", ); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Set up our fake system with 5 sleds. let rack_id = Uuid::new_v4(); @@ -3284,7 +3283,7 @@ mod tests { ) .await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3409,8 +3408,8 @@ mod tests { let logctx = dev::test_setup_log("test_vpc_system_router_sync_to_subnets"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (_, authz_vpc, db_vpc, _, db_router) = create_initial_vpc(log, &opctx, &datastore).await; @@ -3544,7 +3543,7 @@ mod tests { ) .await; - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -3636,8 +3635,8 @@ mod tests { let logctx = dev::test_setup_log("test_vpc_router_rule_instance_resolve"); let log = &logctx.log; - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let (authz_project, authz_vpc, db_vpc, authz_router, _) = create_initial_vpc(log, &opctx, &datastore).await; @@ -3773,7 +3772,7 @@ mod tests { _ => false, })); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/explain.rs b/nexus/db-queries/src/db/explain.rs index 52844c204f..284e96bc6e 100644 --- a/nexus/db-queries/src/db/explain.rs +++ b/nexus/db-queries/src/db/explain.rs @@ -94,10 +94,10 @@ mod test { use super::*; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::AsyncSimpleConnection; use diesel::SelectableHelper; use expectorate::assert_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -142,9 +142,8 @@ mod test { #[tokio::test] async fn test_explain_async() { let logctx = dev::test_setup_log("test_explain_async"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); create_schema(&pool).await; @@ -158,7 +157,7 @@ mod test { .unwrap(); assert_contents("tests/output/test-explain-output", &explanation); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -167,9 +166,8 @@ mod test { #[tokio::test] async fn test_explain_full_table_scan() { let logctx = dev::test_setup_log("test_explain_full_table_scan"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); create_schema(&pool).await; @@ -187,7 +185,7 @@ mod test { "Expected [{}] to contain 'FULL SCAN'", explanation ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index bc1368820c..43cd2a073f 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -958,24 +958,16 @@ mod test { use super::Instance; use super::LookupPath; use super::Project; - use crate::context::OpContext; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::model::Name; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; - use std::sync::Arc; /* This is a smoke test that things basically appear to work. */ #[tokio::test] async fn test_lookup() { let logctx = dev::test_setup_log("test_lookup"); - let mut db = test_setup_database(&logctx.log).await; - let (_, datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - let opctx = OpContext::for_tests( - logctx.log.new(o!()), - Arc::clone(&datastore) as Arc, - ); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let project_name: Name = Name("my-project".parse().unwrap()); let instance_name: Name = Name("my-instance".parse().unwrap()); @@ -999,7 +991,7 @@ mod test { Project::PrimaryKey(_, p) if *p == project_id)); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/pagination.rs b/nexus/db-queries/src/db/pagination.rs index 4872e18136..a16591ad6c 100644 --- a/nexus/db-queries/src/db/pagination.rs +++ b/nexus/db-queries/src/db/pagination.rs @@ -343,11 +343,11 @@ mod test { use super::*; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use async_bb8_diesel::{AsyncRunQueryDsl, AsyncSimpleConnection}; use diesel::JoinOnDsl; use diesel::SelectableHelper; use dropshot::PaginationOrder; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::DataPageParams; use omicron_test_utils::dev; use std::num::NonZeroU32; @@ -489,9 +489,8 @@ mod test { async fn test_paginated_single_column_ascending() { let logctx = dev::test_setup_log("test_paginated_single_column_ascending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -516,7 +515,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 2), (3, 3)]); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -524,9 +523,8 @@ mod test { async fn test_paginated_single_column_descending() { let logctx = dev::test_setup_log("test_paginated_single_column_descending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -551,7 +549,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 2), (1, 1)]); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -559,9 +557,8 @@ mod test { async fn test_paginated_multicolumn_ascending() { let logctx = dev::test_setup_log("test_paginated_multicolumn_ascending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -605,7 +602,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(1, 1), (2, 1), (3, 1), (1, 2), (2, 3)]); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -613,9 +610,8 @@ mod test { async fn test_paginated_multicolumn_descending() { let logctx = dev::test_setup_log("test_paginated_multicolumn_descending"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_users::dsl; @@ -659,7 +655,7 @@ mod test { let observed = execute_query(&pool, query).await; assert_eq!(observed, vec![(2, 3), (1, 2), (3, 1), (2, 1), (1, 1)]); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } @@ -669,9 +665,8 @@ mod test { let logctx = dev::test_setup_log("test_paginated_multicolumn_works_with_joins"); - let mut db = test_setup_database(&logctx.log).await; - let cfg = db::Config { url: db.pg_config().clone() }; - let pool = db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); use schema::test_phone_numbers::dsl as phone_numbers_dsl; use schema::test_users::dsl; @@ -760,7 +755,7 @@ mod test { &[((2, 3), 42), ((3, 1), 50), ((3, 1), 51), ((3, 1), 52)] ); - let _ = db.cleanup().await; + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/db/pool.rs b/nexus/db-queries/src/db/pool.rs index ea669a419e..c42158a64f 100644 --- a/nexus/db-queries/src/db/pool.rs +++ b/nexus/db-queries/src/db/pool.rs @@ -28,6 +28,8 @@ type QorbPool = qorb::pool::Pool; /// Expected to be used as the primary interface to the database. pub struct Pool { inner: QorbPool, + + terminated: std::sync::atomic::AtomicBool, } // Provides an alternative to the DNS resolver for cases where we want to @@ -89,7 +91,10 @@ impl Pool { let connector = make_postgres_connector(log); let policy = Policy::default(); - Pool { inner: qorb::pool::Pool::new(resolver, connector, policy) } + Pool { + inner: qorb::pool::Pool::new(resolver, connector, policy), + terminated: std::sync::atomic::AtomicBool::new(false), + } } /// Creates a new qorb-backed connection pool to a single instance of the @@ -107,7 +112,10 @@ impl Pool { let connector = make_postgres_connector(log); let policy = Policy::default(); - Pool { inner: qorb::pool::Pool::new(resolver, connector, policy) } + Pool { + inner: qorb::pool::Pool::new(resolver, connector, policy), + terminated: std::sync::atomic::AtomicBool::new(false), + } } /// Creates a new qorb-backed connection pool which returns an error @@ -131,7 +139,10 @@ impl Pool { claim_timeout: tokio::time::Duration::from_millis(1), ..Default::default() }; - Pool { inner: qorb::pool::Pool::new(resolver, connector, policy) } + Pool { + inner: qorb::pool::Pool::new(resolver, connector, policy), + terminated: std::sync::atomic::AtomicBool::new(false), + } } /// Returns a connection from the pool @@ -140,4 +151,36 @@ impl Pool { ) -> anyhow::Result> { Ok(self.inner.claim().await?) } + + /// Stops the qorb background tasks, and causes all future claims to fail + pub async fn terminate(&self) { + let _termination_result = self.inner.terminate().await; + self.terminated.store(true, std::sync::atomic::Ordering::SeqCst); + } +} + +impl Drop for Pool { + fn drop(&mut self) { + // Dropping the pool means that qorb may have background tasks, which + // may send requests even after this "drop" point. + // + // When we drop the qorb pool, we'll attempt to cancel those tasks, but + // it's possible for these tasks to keep nudging slightly forward if + // we're using a multi-threaded async executor. + // + // With this check, we'll reliably panic (rather than flake) if the pool + // is dropped without terminating these worker tasks. + if !self.terminated.load(std::sync::atomic::Ordering::SeqCst) { + // If we're already panicking, don't panic again. + // Doing so can ruin test handlers by aborting the process. + // + // Instead, just drop a message to stderr and carry on. + let msg = "Pool dropped without invoking `terminate`"; + if std::thread::panicking() { + eprintln!("{msg}"); + } else { + panic!("{msg}"); + } + } + } } diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 2caab9ee46..d1028fbdb6 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -865,8 +865,7 @@ impl RunQueryDsl for NextExternalIp {} #[cfg(test)] mod tests { use crate::authz; - use crate::context::OpContext; - use crate::db::datastore::DataStore; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::SERVICE_IP_POOL_NAME; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -882,7 +881,6 @@ mod tests { use nexus_db_model::IpPoolResource; use nexus_db_model::IpPoolResourceType; use nexus_sled_agent_shared::inventory::ZoneKind; - use nexus_test_utils::db::test_setup_database; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; @@ -893,41 +891,25 @@ mod tests { use omicron_common::api::external::Error; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_test_utils::dev; - use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::ExternalIpUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use omicron_uuid_kinds::OmicronZoneUuid; use std::net::IpAddr; use std::net::Ipv4Addr; - use std::sync::Arc; use uuid::Uuid; struct TestContext { logctx: LogContext, - opctx: OpContext, - db: CockroachInstance, - db_datastore: Arc, + db: TestDatabase, } impl TestContext { async fn new(test_name: &str) -> Self { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let db_datastore = Arc::new( - crate::db::DataStore::new(&logctx.log, Arc::clone(&pool), None) - .await - .unwrap(), - ); - let opctx = - OpContext::for_tests(log.new(o!()), db_datastore.clone()); - Self { logctx, opctx, db, db_datastore } + let db = TestDatabase::new_with_datastore(&log).await; + Self { logctx, db } } /// Create pool, associate with current silo @@ -942,26 +924,28 @@ mod tests { description: format!("ip pool {}", name), }); - self.db_datastore - .ip_pool_create(&self.opctx, pool.clone()) + self.db + .datastore() + .ip_pool_create(self.db.opctx(), pool.clone()) .await .expect("Failed to create IP pool"); - let silo_id = self.opctx.authn.silo_required().unwrap().id(); + let silo_id = self.db.opctx().authn.silo_required().unwrap().id(); let association = IpPoolResource { resource_id: silo_id, resource_type: IpPoolResourceType::Silo, ip_pool_id: pool.id(), is_default, }; - self.db_datastore - .ip_pool_link_silo(&self.opctx, association) + self.db + .datastore() + .ip_pool_link_silo(self.db.opctx(), association) .await .expect("Failed to associate IP pool with silo"); self.initialize_ip_pool(name, range).await; - LookupPath::new(&self.opctx, &self.db_datastore) + LookupPath::new(self.db.opctx(), &self.db.datastore()) .ip_pool_id(pool.id()) .lookup_for(authz::Action::Read) .await @@ -973,8 +957,9 @@ mod tests { // Find the target IP pool use crate::db::schema::ip_pool::dsl as ip_pool_dsl; let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(); let pool = ip_pool_dsl::ip_pool @@ -993,8 +978,9 @@ mod tests { .values(pool_range) .execute_async( &*self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(), ) @@ -1021,8 +1007,9 @@ mod tests { }); let conn = self - .db_datastore - .pool_connection_authorized(&self.opctx) + .db + .datastore() + .pool_connection_authorized(self.db.opctx()) .await .unwrap(); @@ -1038,15 +1025,16 @@ mod tests { async fn default_pool_id(&self) -> Uuid { let (.., pool) = self - .db_datastore - .ip_pools_fetch_default(&self.opctx) + .db + .datastore() + .ip_pools_fetch_default(self.db.opctx()) .await .expect("Failed to lookup default ip pool"); pool.identity.id } - async fn success(mut self) { - self.db.cleanup().await.unwrap(); + async fn success(self) { + self.db.terminate().await; self.logctx.cleanup_successful(); } } @@ -1068,9 +1056,10 @@ mod tests { let id = Uuid::new_v4(); let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1085,9 +1074,10 @@ mod tests { // The next allocation should fail, due to IP exhaustion let instance_id = InstanceUuid::new_v4(); let err = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1122,9 +1112,10 @@ mod tests { // the only address in the pool. let instance_id = context.create_instance("for-eph").await; let ephemeral_ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1141,9 +1132,10 @@ mod tests { // nor any SNAT IPs. let instance_id = context.create_instance("for-snat").await; let res = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1164,9 +1156,10 @@ mod tests { ); let res = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1218,9 +1211,10 @@ mod tests { for (expected_ip, expected_first_port) in external_ips.clone().take(2) { let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1237,8 +1231,9 @@ mod tests { // Release the first context - .db_datastore - .deallocate_external_ip(&context.opctx, ips[0].id) + .db + .datastore() + .deallocate_external_ip(context.db.opctx(), ips[0].id) .await .expect("Failed to release the first external IP address"); @@ -1246,9 +1241,10 @@ mod tests { // released. let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1273,9 +1269,10 @@ mod tests { // from the original loop. let instance_id = InstanceUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1310,9 +1307,10 @@ mod tests { let pool_name = None; let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), id, instance_id, pool_name, @@ -1358,9 +1356,10 @@ mod tests { // service. let service_id = OmicronZoneUuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1375,9 +1374,10 @@ mod tests { // Try allocating the same service IP again. let ip_again = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1391,9 +1391,9 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different UUID. let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1411,9 +1411,9 @@ mod tests { // Try allocating the same service IP once more, but do it with a // different input address. let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1437,9 +1437,9 @@ mod tests { .unwrap(), }); let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::BoundaryNtp, ip_10_0_0_3_snat_0, @@ -1464,9 +1464,10 @@ mod tests { }); let snat_service_id = OmicronZoneUuid::new_v4(); let snat_ip = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1485,9 +1486,10 @@ mod tests { // Try allocating the same service IP again. let snat_ip_again = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1513,9 +1515,9 @@ mod tests { .unwrap(), }); let err = context - .db_datastore + .db.datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_49152, @@ -1552,9 +1554,10 @@ mod tests { let service_id = OmicronZoneUuid::new_v4(); let err = context - .db_datastore + .db + .datastore() .external_ip_allocate_omicron_zone( - &context.opctx, + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_5, @@ -1586,9 +1589,10 @@ mod tests { let instance_id = InstanceUuid::new_v4(); let id = Uuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1603,9 +1607,10 @@ mod tests { // Create a new IP, with the _same_ ID, and ensure we get back the same // value. let new_ip = context - .db_datastore + .db + .datastore() .allocate_instance_snat_ip( - &context.opctx, + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1656,9 +1661,10 @@ mod tests { let id = Uuid::new_v4(); let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), id, instance_id, Some(p1), @@ -1701,9 +1707,10 @@ mod tests { let instance_id = context.create_instance(&format!("o{octet}")).await; let ip = context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1.clone()), @@ -1723,9 +1730,10 @@ mod tests { // Allocating another address should _fail_, and not use the first pool. let instance_id = context.create_instance("final").await; context - .db_datastore + .db + .datastore() .allocate_instance_ephemeral_ip( - &context.opctx, + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1), diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index d664f7459a..39c799d223 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -1797,6 +1797,7 @@ mod tests { use super::NUM_INITIAL_RESERVED_IP_ADDRESSES; use crate::authz; use crate::context::OpContext; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::DataStore; use crate::db::identity::Resource; use crate::db::lookup::LookupPath; @@ -1811,7 +1812,6 @@ mod tests { use async_bb8_diesel::AsyncRunQueryDsl; use dropshot::test_util::LogContext; use model::NetworkInterfaceKind; - use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::params; use nexus_types::external_api::params::InstanceCreate; use nexus_types::external_api::params::InstanceNetworkInterfaceAttachment; @@ -1822,7 +1822,6 @@ mod tests { use omicron_common::api::external::InstanceCpuCount; use omicron_common::api::external::MacAddr; use omicron_test_utils::dev; - use omicron_test_utils::dev::db::CockroachInstance; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; use oxnet::Ipv4Net; @@ -1832,7 +1831,6 @@ mod tests { use std::net::IpAddr; use std::net::Ipv4Addr; use std::net::Ipv6Addr; - use std::sync::Arc; use uuid::Uuid; // Add an instance. We'll use this to verify that the instance must be @@ -1964,9 +1962,7 @@ mod tests { // Context for testing network interface queries. struct TestContext { logctx: LogContext, - opctx: OpContext, - db: CockroachInstance, - db_datastore: Arc, + db: TestDatabase, project_id: Uuid, net1: Network, net2: Network, @@ -1976,10 +1972,8 @@ mod tests { async fn new(test_name: &str, n_subnets: u8) -> Self { let logctx = dev::test_setup_log(test_name); let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - let (opctx, db_datastore) = - crate::db::datastore::test_utils::datastore_test(&logctx, &db) - .await; + let db = TestDatabase::new_with_datastore(&log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); let authz_silo = opctx.authn.silo_required().unwrap(); @@ -1994,11 +1988,11 @@ mod tests { }, ); let (.., project) = - db_datastore.project_create(&opctx, project).await.unwrap(); + datastore.project_create(&opctx, project).await.unwrap(); use crate::db::schema::vpc_subnet::dsl::vpc_subnet; let conn = - db_datastore.pool_connection_authorized(&opctx).await.unwrap(); + datastore.pool_connection_authorized(&opctx).await.unwrap(); let net1 = Network::new(n_subnets); let net2 = Network::new(n_subnets); for subnet in net1.subnets.iter().chain(net2.subnets.iter()) { @@ -2009,29 +2003,29 @@ mod tests { .unwrap(); } drop(conn); - Self { - logctx, - opctx, - db, - db_datastore, - project_id: project.id(), - net1, - net2, - } + Self { logctx, db, project_id: project.id(), net1, net2 } + } + + fn opctx(&self) -> &OpContext { + self.db.opctx() + } + + fn datastore(&self) -> &DataStore { + self.db.datastore() } - async fn success(mut self) { - self.db.cleanup().await.unwrap(); + async fn success(self) { + self.db.terminate().await; self.logctx.cleanup_successful(); } async fn create_stopped_instance(&self) -> Instance { instance_set_state( - &self.db_datastore, + self.datastore(), create_instance( - &self.opctx, + self.opctx(), self.project_id, - &self.db_datastore, + self.datastore(), ) .await, InstanceState::NoVmm, @@ -2041,11 +2035,11 @@ mod tests { async fn create_running_instance(&self) -> Instance { instance_set_state( - &self.db_datastore, + self.datastore(), create_instance( - &self.opctx, + self.opctx(), self.project_id, - &self.db_datastore, + self.datastore(), ) .await, InstanceState::Vmm, @@ -2078,17 +2072,17 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); // We should be able to delete twice, and be told that the first delete // modified the row and the second did not. let first_deleted = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, inserted_interface.id(), ) @@ -2097,9 +2091,9 @@ mod tests { assert!(first_deleted, "first delete removed interface"); let second_deleted = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, inserted_interface.id(), ) @@ -2110,9 +2104,9 @@ mod tests { // Attempting to delete a nonexistent interface should fail. let bogus_id = Uuid::new_v4(); let err = context - .db_datastore + .datastore() .service_delete_network_interface( - &context.opctx, + context.opctx(), service_id, bogus_id, ) @@ -2147,8 +2141,8 @@ mod tests { Some(requested_ip), ) .unwrap(); - let err = context.db_datastore - .instance_create_network_interface_raw(&context.opctx, interface.clone()) + let err = context.datastore() + .instance_create_network_interface_raw(context.opctx(), interface.clone()) .await .expect_err("Should not be able to create an interface for a running instance"); assert!( @@ -2177,9 +2171,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2208,8 +2202,8 @@ mod tests { None, ) .unwrap(); - let err = context.db_datastore - .instance_create_network_interface_raw(&context.opctx, interface.clone()) + let err = context.datastore() + .instance_create_network_interface_raw(context.opctx(), interface.clone()) .await .expect_err("Should not be able to insert an interface for an instance that doesn't exist"); assert!( @@ -2247,9 +2241,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2291,8 +2285,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); @@ -2310,8 +2304,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::IpAddressNotAvailable(_))), @@ -2347,8 +2341,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(inserted_interface.mac.0, mac); @@ -2387,8 +2381,11 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + interface, + ) .await .expect("Failed to insert interface"); assert_eq!(*inserted_interface.slot, slot); @@ -2424,8 +2421,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(inserted_interface.mac.0, mac); @@ -2447,8 +2444,11 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, new_interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + new_interface, + ) .await; assert!( matches!(result, Err(InsertError::MacAddressNotAvailable(_))), @@ -2500,8 +2500,8 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, interface) + .datastore() + .service_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); assert_eq!(*inserted_interface.slot, 0); @@ -2521,8 +2521,11 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .service_create_network_interface_raw(&context.opctx, new_interface) + .datastore() + .service_create_network_interface_raw( + context.opctx(), + new_interface, + ) .await; assert!( matches!(result, Err(InsertError::SlotNotAvailable(0))), @@ -2549,9 +2552,9 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2568,8 +2571,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!( @@ -2599,8 +2602,8 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); let interface = IncompleteNetworkInterface::new_instance( @@ -2615,8 +2618,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::NonUniqueVpcSubnets)), @@ -2643,16 +2646,16 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await .expect("Failed to insert interface"); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!( @@ -2685,8 +2688,8 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await .expect("Failed to insert interface"); let expected_address = "172.30.0.5".parse().unwrap(); @@ -2703,9 +2706,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await; @@ -2739,9 +2742,9 @@ mod tests { ) .unwrap(); let _ = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await @@ -2750,9 +2753,9 @@ mod tests { // Next one should fail let instance = create_stopped_instance( - &context.opctx, + context.opctx(), context.project_id, - &context.db_datastore, + context.datastore(), ) .await; let instance_id = InstanceUuid::from_untyped_uuid(instance.id()); @@ -2768,8 +2771,8 @@ mod tests { ) .unwrap(); let result = context - .db_datastore - .instance_create_network_interface_raw(&context.opctx, interface) + .datastore() + .instance_create_network_interface_raw(context.opctx(), interface) .await; assert!( matches!(result, Err(InsertError::NoAvailableIpAddresses)), @@ -2801,9 +2804,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface, ) .await; @@ -2869,9 +2872,9 @@ mod tests { ) .unwrap(); let inserted_interface = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await @@ -2904,9 +2907,9 @@ mod tests { ) .unwrap(); let result = context - .db_datastore + .datastore() .instance_create_network_interface_raw( - &context.opctx, + context.opctx(), interface.clone(), ) .await diff --git a/nexus/db-queries/src/db/queries/next_item.rs b/nexus/db-queries/src/db/queries/next_item.rs index 970baede2c..0ec0727737 100644 --- a/nexus/db-queries/src/db/queries/next_item.rs +++ b/nexus/db-queries/src/db/queries/next_item.rs @@ -924,6 +924,7 @@ mod tests { use super::NextItem; use super::ShiftIndices; use crate::db; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync as _; use crate::db::queries::next_item::NextItemSelfJoined; use async_bb8_diesel::AsyncRunQueryDsl; @@ -937,9 +938,7 @@ mod tests { use diesel::Column; use diesel::Insertable; use diesel::SelectableHelper; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; - use std::sync::Arc; use uuid::Uuid; table! { @@ -1102,11 +1101,8 @@ mod tests { async fn test_wrapping_next_item_query() { // Setup the test database let logctx = dev::test_setup_log("test_wrapping_next_item_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1156,7 +1152,7 @@ mod tests { .unwrap(); assert_eq!(it.value, 2); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1165,11 +1161,8 @@ mod tests { // Setup the test database let logctx = dev::test_setup_log("test_next_item_query_is_ordered_by_indices"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1212,7 +1205,7 @@ mod tests { "The next item query should not have further items to generate", ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1255,11 +1248,8 @@ mod tests { async fn test_explain_next_item_self_joined() { // Setup the test database let logctx = dev::test_setup_log("test_explain_next_item_self_joined"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1274,7 +1264,7 @@ mod tests { >::new_scoped(Uuid::nil(), i32::MIN, i32::MAX); let out = query.explain_async(&conn).await.unwrap(); println!("{out}"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1282,11 +1272,8 @@ mod tests { async fn test_next_item_self_joined() { // Setup the test database let logctx = dev::test_setup_log("test_next_item_self_joined"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1310,7 +1297,7 @@ mod tests { .get_result_async(&*conn) .await .expect_err("should not be able to insert after the query range is exhausted"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1319,11 +1306,8 @@ mod tests { // Setup the test database let logctx = dev::test_setup_log("test_next_item_self_joined_with_gaps"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. @@ -1364,7 +1348,7 @@ mod tests { "Should have inserted the next skipped value" ); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -1373,11 +1357,8 @@ mod tests { async fn print_next_item_query_forms() { // Setup the test database let logctx = dev::test_setup_log("print_next_item_query_forms"); - let log = logctx.log.new(o!()); - let db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); // We're going to operate on a separate table, for simplicity. diff --git a/nexus/db-queries/src/db/queries/oximeter.rs b/nexus/db-queries/src/db/queries/oximeter.rs index ab2d194c3e..eee7dd5669 100644 --- a/nexus/db-queries/src/db/queries/oximeter.rs +++ b/nexus/db-queries/src/db/queries/oximeter.rs @@ -221,9 +221,9 @@ pub fn reassign_producers_query(oximeter_id: Uuid) -> TypedSqlQuery<()> { #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use std::time::Duration; use uuid::Uuid; @@ -266,10 +266,8 @@ mod test { #[tokio::test] async fn explainable_upsert_producer() { let logctx = dev::test_setup_log("explainable_upsert_producer"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let producer = internal::nexus::ProducerEndpoint { @@ -285,17 +283,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explainable_reassign_producers() { let logctx = dev::test_setup_log("explainable_reassign_producers"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let oximeter_id = Uuid::nil(); @@ -306,7 +302,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/region_allocation.rs b/nexus/db-queries/src/db/queries/region_allocation.rs index dbf37fda2e..a531e726b3 100644 --- a/nexus/db-queries/src/db/queries/region_allocation.rs +++ b/nexus/db-queries/src/db/queries/region_allocation.rs @@ -405,10 +405,10 @@ UNION #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::datastore::REGION_REDUNDANCY_THRESHOLD; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -504,10 +504,8 @@ mod test { #[tokio::test] async fn explainable() { let logctx = dev::test_setup_log("explainable"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let volume_id = Uuid::new_v4(); @@ -546,7 +544,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs index 9d2ed04c85..8d3ef320ee 100644 --- a/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs +++ b/nexus/db-queries/src/db/queries/virtual_provisioning_collection_update.rs @@ -478,9 +478,9 @@ FROM #[cfg(test)] mod test { use super::*; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync; use crate::db::raw_query_builder::expectorate_query_contents; - use nexus_test_utils::db::test_setup_database; use omicron_test_utils::dev; use uuid::Uuid; @@ -565,10 +565,8 @@ mod test { #[tokio::test] async fn explain_insert_storage() { let logctx = dev::test_setup_log("explain_insert_storage"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = Uuid::nil(); @@ -587,17 +585,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_delete_storage() { let logctx = dev::test_setup_log("explain_delete_storage"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = Uuid::nil(); @@ -614,17 +610,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_insert_instance() { let logctx = dev::test_setup_log("explain_insert_instance"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = InstanceUuid::nil(); @@ -640,17 +634,15 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } #[tokio::test] async fn explain_delete_instance() { let logctx = dev::test_setup_log("explain_delete_instance"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = crate::db::Pool::new_single_host(&logctx.log, &cfg); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); let conn = pool.claim().await.unwrap(); let id = InstanceUuid::nil(); @@ -666,7 +658,7 @@ mod test { .await .expect("Failed to explain query - is it valid SQL?"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/db/queries/vpc_subnet.rs b/nexus/db-queries/src/db/queries/vpc_subnet.rs index 7e3db1dd59..17ee5103aa 100644 --- a/nexus/db-queries/src/db/queries/vpc_subnet.rs +++ b/nexus/db-queries/src/db/queries/vpc_subnet.rs @@ -288,14 +288,13 @@ impl InsertVpcSubnetError { mod test { use super::InsertVpcSubnetError; use super::InsertVpcSubnetQuery; + use crate::db::datastore::pub_test_utils::TestDatabase; use crate::db::explain::ExplainableAsync as _; use crate::db::model::VpcSubnet; - use nexus_test_utils::db::test_setup_database; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; use omicron_test_utils::dev; use std::convert::TryInto; - use std::sync::Arc; #[tokio::test] async fn explain_insert_query() { @@ -310,15 +309,11 @@ mod test { VpcSubnet::new(subnet_id, vpc_id, identity, ipv4_block, ipv6_block); let query = InsertVpcSubnetQuery::new(row); let logctx = dev::test_setup_log("explain_insert_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let conn = pool.claim().await.unwrap(); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let conn = db.pool().claim().await.unwrap(); let explain = query.explain_async(&conn).await.unwrap(); println!("{explain}"); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -356,16 +351,8 @@ mod test { // Setup the test database let logctx = dev::test_setup_log("test_insert_vpc_subnet_query"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let db_datastore = Arc::new( - crate::db::DataStore::new(&log, Arc::clone(&pool), None) - .await - .unwrap(), - ); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let db_datastore = db.datastore(); // We should be able to insert anything into an empty table. assert!( @@ -551,7 +538,7 @@ mod test { "Should be able to insert new VPC Subnet with non-overlapping IP ranges" ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -624,16 +611,8 @@ mod test { // Setup the test database let logctx = dev::test_setup_log("test_insert_vpc_subnet_query_is_idempotent"); - let log = logctx.log.new(o!()); - let mut db = test_setup_database(&log).await; - let cfg = crate::db::Config { url: db.pg_config().clone() }; - let pool = - Arc::new(crate::db::Pool::new_single_host(&logctx.log, &cfg)); - let db_datastore = Arc::new( - crate::db::DataStore::new(&log, Arc::clone(&pool), None) - .await - .unwrap(), - ); + let db = TestDatabase::new_with_raw_datastore(&logctx.log).await; + let db_datastore = db.datastore(); // We should be able to insert anything into an empty table. let inserted = db_datastore @@ -652,7 +631,7 @@ mod test { "Must be able to insert the exact same VPC subnet more than once", ); assert_rows_eq(&inserted, &row); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/db-queries/src/policy_test/mod.rs b/nexus/db-queries/src/policy_test/mod.rs index 395a480c47..be81f58138 100644 --- a/nexus/db-queries/src/policy_test/mod.rs +++ b/nexus/db-queries/src/policy_test/mod.rs @@ -14,7 +14,7 @@ mod coverage; mod resource_builder; mod resources; -use crate::db; +use crate::db::datastore::pub_test_utils::TestDatabase; use coverage::Coverage; use futures::StreamExt; use nexus_auth::authn; @@ -22,7 +22,6 @@ use nexus_auth::authn::SiloAuthnPolicy; use nexus_auth::authn::USER_TEST_PRIVILEGED; use nexus_auth::authz; use nexus_auth::context::OpContext; -use nexus_test_utils::db::test_setup_database; use nexus_types::external_api::shared; use nexus_types::external_api::shared::FleetRole; use nexus_types::external_api::shared::SiloRole; @@ -62,9 +61,8 @@ use uuid::Uuid; #[tokio::test(flavor = "multi_thread")] async fn test_iam_roles_behavior() { let logctx = dev::test_setup_log("test_iam_roles"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - db::datastore::test_utils::datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and @@ -173,7 +171,7 @@ async fn test_iam_roles_behavior() { &std::str::from_utf8(buffer.as_ref()).expect("non-UTF8 output"), ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -329,9 +327,8 @@ impl Write for StdoutTee { async fn test_conferred_roles() { // To start, this test looks a lot like the test above. let logctx = dev::test_setup_log("test_conferred_roles"); - let mut db = test_setup_database(&logctx.log).await; - let (opctx, datastore) = - db::datastore::test_utils::datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); // Before we can create the resources, users, and role assignments that we // need, we must grant the "test-privileged" user privileges to fetch and @@ -464,6 +461,6 @@ async fn test_conferred_roles() { &std::str::from_utf8(buffer.as_ref()).expect("non-UTF8 output"), ); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } diff --git a/nexus/db-queries/src/transaction_retry.rs b/nexus/db-queries/src/transaction_retry.rs index 03e0574f52..9975d1cf3b 100644 --- a/nexus/db-queries/src/transaction_retry.rs +++ b/nexus/db-queries/src/transaction_retry.rs @@ -258,8 +258,7 @@ impl OptionalError { mod test { use super::*; - use crate::db::datastore::test_utils::datastore_test; - use nexus_test_utils::db::test_setup_database; + use crate::db::datastore::pub_test_utils::TestDatabase; use omicron_test_utils::dev; use oximeter::types::FieldValue; @@ -271,8 +270,8 @@ mod test { let logctx = dev::test_setup_log( "test_transaction_rollback_produces_no_samples", ); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); @@ -294,7 +293,7 @@ mod test { .clone(); assert_eq!(samples, vec![]); - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } @@ -304,8 +303,8 @@ mod test { async fn test_transaction_retry_produces_samples() { let logctx = dev::test_setup_log("test_transaction_retry_produces_samples"); - let mut db = test_setup_database(&logctx.log).await; - let (_opctx, datastore) = datastore_test(&logctx, &db).await; + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let datastore = db.datastore(); let conn = datastore.pool_connection_for_tests().await.unwrap(); datastore @@ -355,7 +354,7 @@ mod test { ); } - db.cleanup().await.unwrap(); + db.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/metrics-producer-gc/src/lib.rs b/nexus/metrics-producer-gc/src/lib.rs index 407af2fdbd..32e2be5809 100644 --- a/nexus/metrics-producer-gc/src/lib.rs +++ b/nexus/metrics-producer-gc/src/lib.rs @@ -218,7 +218,7 @@ mod tests { let logctx = dev::test_setup_log("test_prune_expired_producers"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; // Insert an Oximeter collector let collector_info = OximeterInfo::new(¶ms::OximeterInfo { @@ -291,6 +291,7 @@ mod tests { assert!(pruned.successes.is_empty()); assert!(pruned.failures.is_empty()); + datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -303,7 +304,7 @@ mod tests { ); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let mut collector = httptest::Server::run(); @@ -357,6 +358,7 @@ mod tests { collector.verify_and_clear(); + datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/app/background/tasks/crdb_node_id_collector.rs b/nexus/src/app/background/tasks/crdb_node_id_collector.rs index e92b013ac2..ddb068226f 100644 --- a/nexus/src/app/background/tasks/crdb_node_id_collector.rs +++ b/nexus/src/app/background/tasks/crdb_node_id_collector.rs @@ -349,7 +349,7 @@ mod tests { let logctx = dev::test_setup_log("test_activate_fails_if_no_blueprint"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let (_tx_blueprint, rx_blueprint) = watch::channel(None); let mut collector = @@ -358,6 +358,7 @@ mod tests { assert_eq!(result, json!({"error": "no blueprint"})); + datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -380,7 +381,7 @@ mod tests { dev::test_setup_log("test_activate_with_no_unknown_node_ids"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let blueprint = BlueprintBuilder::build_empty_with_sleds( iter::once(SledUuid::new_v4()), @@ -434,6 +435,7 @@ mod tests { .await; assert_eq!(result, json!({"nsuccess": crdb_zones.len()})); + datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -444,7 +446,7 @@ mod tests { let logctx = dev::test_setup_log("test_activate_with_unknown_node_ids"); let mut db = test_setup_database(&logctx.log).await; let (opctx, datastore) = - datastore_test(&logctx, &db, Uuid::new_v4()).await; + datastore_test(&logctx.log, &db, Uuid::new_v4()).await; let blueprint = BlueprintBuilder::build_empty_with_sleds( iter::once(SledUuid::new_v4()), @@ -573,6 +575,7 @@ mod tests { assert!(crdb_err3.contains(&crdb_zone_id3)); assert!(crdb_err3.contains(&crdb_zone_id4)); + datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/app/background/tasks/saga_recovery.rs b/nexus/src/app/background/tasks/saga_recovery.rs index 42069ac4ed..bee8884fd4 100644 --- a/nexus/src/app/background/tasks/saga_recovery.rs +++ b/nexus/src/app/background/tasks/saga_recovery.rs @@ -743,6 +743,7 @@ mod test { drop(task); let sec_client = Arc::try_unwrap(sec_client).unwrap(); sec_client.shutdown().await; + db_datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } @@ -813,6 +814,7 @@ mod test { drop(task); let sec_client = Arc::try_unwrap(sec_client).unwrap(); sec_client.shutdown().await; + db_datastore.terminate().await; db.cleanup().await.unwrap(); logctx.cleanup_successful(); } diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index daee9714d4..0691ecf863 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -218,6 +218,8 @@ pub struct Nexus { impl Nexus { /// Create a new Nexus instance for the given rack id `rack_id` + /// + /// If this function fails, the pool remains unterminated. // TODO-polish revisit rack metadata #[allow(clippy::too_many_arguments)] pub(crate) async fn new_with_id( @@ -225,12 +227,11 @@ impl Nexus { log: Logger, resolver: internal_dns_resolver::Resolver, qorb_resolver: internal_dns_resolver::QorbResolver, - pool: db::Pool, + pool: Arc, producer_registry: &ProducerRegistry, config: &NexusConfig, authz: Arc, ) -> Result, String> { - let pool = Arc::new(pool); let all_versions = config .pkg .schema @@ -239,8 +240,13 @@ impl Nexus { .transpose() .map_err(|error| format!("{error:#}"))?; let db_datastore = Arc::new( - db::DataStore::new(&log, Arc::clone(&pool), all_versions.as_ref()) - .await?, + db::DataStore::new_with_timeout( + &log, + Arc::clone(&pool), + all_versions.as_ref(), + config.pkg.tunables.load_timeout, + ) + .await?, ); db_datastore.register_producers(producer_registry); @@ -647,30 +653,63 @@ impl Nexus { self.producer_server.lock().unwrap().replace(producer_server); } + /// Fully terminates Nexus. + /// + /// Closes all running servers and the connection to the datastore. + pub(crate) async fn terminate(&self) -> Result<(), String> { + let mut res = Ok(()); + res = res.and(self.close_servers().await); + self.datastore().terminate().await; + res + } + + /// Terminates all servers. + /// + /// This function also waits for the servers to shut down. pub(crate) async fn close_servers(&self) -> Result<(), String> { // NOTE: All these take the lock and swap out of the option immediately, // because they are synchronous mutexes, which cannot be held across the // await point these `close()` methods expose. let external_server = self.external_server.lock().unwrap().take(); + let mut res = Ok(()); + + let extend_err = + |mut res: &mut Result<(), String>, mut new: Result<(), String>| { + match (&mut res, &mut new) { + (Err(s), Err(new_err)) => { + s.push_str(&format!(", {new_err}")) + } + (Ok(()), Err(_)) => *res = new, + (_, Ok(())) => (), + } + }; + if let Some(server) = external_server { - server.close().await?; + extend_err(&mut res, server.close().await); } let techport_external_server = self.techport_external_server.lock().unwrap().take(); if let Some(server) = techport_external_server { - server.close().await?; + extend_err(&mut res, server.close().await); } let internal_server = self.internal_server.lock().unwrap().take(); if let Some(server) = internal_server { - server.close().await?; + extend_err(&mut res, server.close().await); } let producer_server = self.producer_server.lock().unwrap().take(); if let Some(server) = producer_server { - server.close().await.map_err(|e| e.to_string())?; + extend_err( + &mut res, + server.close().await.map_err(|e| e.to_string()), + ); } - Ok(()) + res } + /// Awaits termination without triggering it. + /// + /// To trigger termination, see: + /// - [`Self::close_servers`] or [`Self::terminate`] pub(crate) async fn wait_for_shutdown(&self) -> Result<(), String> { // The internal server is the last server to be closed. // diff --git a/nexus/src/bin/schema-updater.rs b/nexus/src/bin/schema-updater.rs index 4a43698f00..1b32fe26ee 100644 --- a/nexus/src/bin/schema-updater.rs +++ b/nexus/src/bin/schema-updater.rs @@ -70,15 +70,15 @@ async fn main() -> anyhow::Result<()> { let drain = LevelFilter::new(drain, args.log_level).fuse(); let log = Logger::root(drain, slog::o!("unit" => "schema_updater")); - let crdb_cfg = db::Config { url: args.url }; - let pool = Arc::new(db::Pool::new_single_host(&log, &crdb_cfg)); let schema_config = SchemaConfig { schema_dir: args.schema_directory }; let all_versions = AllSchemaVersions::load(&schema_config.schema_dir)?; + let crdb_cfg = db::Config { url: args.url }; + let pool = Arc::new(db::Pool::new_single_host(&log, &crdb_cfg)); + // We use the unchecked constructor of the datastore because we // don't want to block on someone else applying an upgrade. - let datastore = - DataStore::new_unchecked(log.clone(), pool).map_err(|e| anyhow!(e))?; + let datastore = DataStore::new_unchecked(log.clone(), pool); match args.cmd { Cmd::List => { @@ -112,5 +112,6 @@ async fn main() -> anyhow::Result<()> { println!("Upgrade to {version} complete"); } } + datastore.terminate().await; Ok(()) } diff --git a/nexus/src/context.rs b/nexus/src/context.rs index 1b2089f550..ea3a071605 100644 --- a/nexus/src/context.rs +++ b/nexus/src/context.rs @@ -255,6 +255,11 @@ impl ServerContext { } }; + // Once this database pool is created, it spawns workers which will + // be continually attempting to access database backends. + // + // It must be explicitly terminated, so be cautious about returning + // results beyond this point. let pool = match &config.deployment.database { nexus_config::Database::FromUrl { url } => { info!( @@ -275,17 +280,25 @@ impl ServerContext { } }; - let nexus = Nexus::new_with_id( + let pool = Arc::new(pool); + let nexus = match Nexus::new_with_id( rack_id, log.new(o!("component" => "nexus")), resolver, qorb_resolver, - pool, + pool.clone(), &producer_registry, config, Arc::clone(&authz), ) - .await?; + .await + { + Ok(nexus) => nexus, + Err(err) => { + pool.terminate().await; + return Err(err); + } + }; Ok(Arc::new(ServerContext { nexus, diff --git a/nexus/src/lib.rs b/nexus/src/lib.rs index eaabbd748b..634873ec84 100644 --- a/nexus/src/lib.rs +++ b/nexus/src/lib.rs @@ -83,13 +83,20 @@ impl InternalServer { .await?; // Launch the internal server. - let server_starter_internal = dropshot::HttpServerStarter::new( + let server_starter_internal = match dropshot::HttpServerStarter::new( &config.deployment.dropshot_internal, internal_api(), context.clone(), &log.new(o!("component" => "dropshot_internal")), ) - .map_err(|error| format!("initializing internal server: {}", error))?; + .map_err(|error| format!("initializing internal server: {}", error)) + { + Ok(server) => server, + Err(err) => { + context.context.nexus.datastore().terminate().await; + return Err(err); + } + }; let http_server_internal = server_starter_internal.start(); Ok(Self { @@ -204,12 +211,12 @@ impl Server { &self.apictx.context } - /// Wait for the given server to shut down - /// - /// Note that this doesn't initiate a graceful shutdown, so if you call this - /// immediately after calling `start()`, the program will block indefinitely - /// or until something else initiates a graceful shutdown. - pub(crate) async fn wait_for_finish(self) -> Result<(), String> { + // Wait for the given server to shut down + // + // Note that this doesn't initiate a graceful shutdown, so if you call this + // immediately after calling `start()`, the program will block indefinitely + // or until something else initiates a graceful shutdown. + async fn wait_for_finish(self) -> Result<(), String> { self.server_context().nexus.wait_for_shutdown().await } } @@ -221,12 +228,21 @@ impl nexus_test_interface::NexusServer for Server { async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> (InternalServer, SocketAddr) { - let internal_server = - InternalServer::start(config, &log).await.unwrap(); + ) -> Result<(InternalServer, SocketAddr), String> { + let internal_server = InternalServer::start(config, &log).await?; internal_server.apictx.context.nexus.wait_for_populate().await.unwrap(); let addr = internal_server.http_server_internal.local_addr(); - (internal_server, addr) + Ok((internal_server, addr)) + } + + async fn stop_internal(internal_server: InternalServer) { + internal_server + .apictx + .context + .nexus + .terminate() + .await + .expect("Failed to terminate Nexus"); } async fn start( @@ -392,10 +408,9 @@ impl nexus_test_interface::NexusServer for Server { self.apictx .context .nexus - .close_servers() + .terminate() .await - .expect("failed to close servers during test cleanup"); - self.wait_for_finish().await.unwrap() + .expect("Failed to terminate Nexus"); } } diff --git a/nexus/src/populate.rs b/nexus/src/populate.rs index f026b1b504..08713bcc25 100644 --- a/nexus/src/populate.rs +++ b/nexus/src/populate.rs @@ -421,6 +421,7 @@ mod test { ) }) .unwrap(); + datastore.terminate().await; // Test again with the database offline. In principle we could do this // immediately without creating a new pool and datastore. @@ -460,6 +461,7 @@ mod test { ), }; info!(&log, "populator {:?} done", p); + datastore.terminate().await; logctx.cleanup_successful(); } } diff --git a/nexus/test-interface/src/lib.rs b/nexus/test-interface/src/lib.rs index c1e01cbc9e..d011ceccf2 100644 --- a/nexus/test-interface/src/lib.rs +++ b/nexus/test-interface/src/lib.rs @@ -50,7 +50,17 @@ pub trait NexusServer: Send + Sync + 'static { async fn start_internal( config: &NexusConfig, log: &Logger, - ) -> (Self::InternalServer, SocketAddr); + ) -> Result<(Self::InternalServer, SocketAddr), String>; + + /// Stops the execution of a `Self::InternalServer`. + /// + /// This is used to terminate a server which has been + /// partially created with `Self::start_internal`, but which + /// has not yet been passed to `Self::start`. + /// + /// Once `Self::start` has been called, the internal server + /// may be closed by invoking `Self::close`. + async fn stop_internal(internal_server: Self::InternalServer); #[allow(clippy::too_many_arguments)] async fn start( diff --git a/nexus/test-utils/src/lib.rs b/nexus/test-utils/src/lib.rs index 29c6d634a9..36866a544b 100644 --- a/nexus/test-utils/src/lib.rs +++ b/nexus/test-utils/src/lib.rs @@ -651,7 +651,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { } // Begin starting Nexus. - pub async fn start_nexus_internal(&mut self) { + pub async fn start_nexus_internal(&mut self) -> Result<(), String> { let log = &self.logctx.log; debug!(log, "Starting Nexus (internal API)"); @@ -673,7 +673,7 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { }; let (nexus_internal, nexus_internal_addr) = - N::start_internal(&self.config, &log).await; + N::start_internal(&self.config, &log).await?; let address = SocketAddrV6::new( match nexus_internal_addr.ip() { @@ -738,6 +738,8 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { self.nexus_internal = Some(nexus_internal); self.nexus_internal_addr = Some(nexus_internal_addr); + + Ok(()) } pub async fn populate_internal_dns(&mut self) { @@ -1177,6 +1179,9 @@ impl<'a, N: NexusServer> ControlPlaneTestContextBuilder<'a, N> { if let Some(server) = self.server { server.close().await; } + if let Some(nexus_internal) = self.nexus_internal { + N::stop_internal(nexus_internal).await; + } if let Some(mut database) = self.database { database.cleanup().await.unwrap(); } @@ -1359,7 +1364,12 @@ async fn setup_with_config_impl( ), ( "start_nexus_internal", - Box::new(|builder| builder.start_nexus_internal().boxed()), + Box::new(|builder| { + builder + .start_nexus_internal() + .map(|r| r.unwrap()) + .boxed() + }), ), ( "start_sled1", diff --git a/nexus/tests/integration_tests/initialization.rs b/nexus/tests/integration_tests/initialization.rs index a305a4178e..76ac75caa0 100644 --- a/nexus/tests/integration_tests/initialization.rs +++ b/nexus/tests/integration_tests/initialization.rs @@ -50,7 +50,9 @@ async fn test_nexus_boots_before_cockroach() { let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { info!(nexus_log, "Test: Trying to start Nexus (internal)"); - omicron_nexus::Server::start_internal(&nexus_config, &nexus_log).await; + omicron_nexus::Server::start_internal(&nexus_config, &nexus_log) + .await + .unwrap(); info!(nexus_log, "Test: Started Nexus (internal)"); }); @@ -123,7 +125,9 @@ async fn test_nexus_boots_before_dendrite() { let nexus_log = log.clone(); let nexus_handle = tokio::task::spawn(async move { info!(nexus_log, "Test: Trying to start Nexus (internal)"); - omicron_nexus::Server::start_internal(&nexus_config, &nexus_log).await; + omicron_nexus::Server::start_internal(&nexus_config, &nexus_log) + .await + .unwrap(); info!(nexus_log, "Test: Started Nexus (internal)"); }); @@ -202,6 +206,9 @@ async fn test_nexus_does_not_boot_without_valid_schema() { for schema in schemas_to_test { let mut config = load_test_config(); + config.pkg.tunables.load_timeout = + Some(std::time::Duration::from_secs(5)); + let mut builder = ControlPlaneTestContextBuilder::::new( "test_nexus_does_not_boot_without_valid_schema", @@ -224,14 +231,14 @@ async fn test_nexus_does_not_boot_without_valid_schema() { .await .expect("Failed to update schema"); - assert!( - timeout( - std::time::Duration::from_secs(5), - builder.start_nexus_internal(), - ) + let err = builder + .start_nexus_internal() .await - .is_err(), - "Nexus should have failed to start" + .expect_err("Nexus should have failed to start"); + + assert!( + err.contains("Failed to read valid DB schema"), + "Saw error: {err}" ); builder.teardown().await; diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index bda07be057..f852312de2 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -555,6 +555,8 @@ async fn dbinit_version_matches_version_known_to_nexus() { #[tokio::test] async fn nexus_cannot_apply_update_from_unknown_version() { let mut config = load_test_config(); + config.pkg.tunables.load_timeout = Some(std::time::Duration::from_secs(15)); + let mut builder = test_setup( &mut config, "nexus_cannot_apply_update_from_unknown_version", @@ -587,9 +589,7 @@ async fn nexus_cannot_apply_update_from_unknown_version() { .expect("Failed to update schema"); assert!( - timeout(Duration::from_secs(15), builder.start_nexus_internal()) - .await - .is_err(), + builder.start_nexus_internal().await.is_err(), "Nexus should not have started" ); @@ -1012,6 +1012,7 @@ async fn dbinit_equals_sum_of_all_up() { .expect("failed to insert - did we poison the OID cache?"); } std::mem::drop(conn_from_pool); + pool.terminate().await; std::mem::drop(pool); crdb.cleanup().await.unwrap(); diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 6a19d6591b..9f6c51e0c0 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -90,7 +90,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.1.1", features = ["qtop"] } +qorb = { version = "0.1.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" } @@ -206,7 +206,7 @@ pkcs8 = { version = "0.10.2", default-features = false, features = ["encryption" postgres-types = { version = "0.2.8", default-features = false, features = ["with-chrono-0_4", "with-serde_json-1", "with-uuid-1"] } predicates = { version = "3.1.2" } proc-macro2 = { version = "1.0.87" } -qorb = { version = "0.1.1", features = ["qtop"] } +qorb = { version = "0.1.2", features = ["qtop"] } quote = { version = "1.0.37" } rand = { version = "0.8.5", features = ["small_rng"] } regex = { version = "1.11.0" }