diff --git a/live-tests/tests/common/mod.rs b/live-tests/tests/common/mod.rs index 32c0bfb982..3e3e583870 100644 --- a/live-tests/tests/common/mod.rs +++ b/live-tests/tests/common/mod.rs @@ -46,9 +46,8 @@ impl LiveTestContext { /// Clean up this `LiveTestContext` /// - /// This mainly removes log files created by the test. We do this in this - /// explicit cleanup function rather than on `Drop` because we want the log - /// files preserved on test failure. + /// This removes log files and cleans up the [`DataStore`], which + /// but be terminated asynchronously. pub async fn cleanup_successful(self) { self.datastore.terminate().await; self.logctx.cleanup_successful(); diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index ef6716a43b..a03b6a6249 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -87,7 +87,7 @@ impl DataStore { probe_id: Uuid, pool: Option, ) -> CreateResult { - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral_probe( ip_id, probe_id, @@ -123,7 +123,7 @@ impl DataStore { // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an @@ -205,7 +205,7 @@ impl DataStore { // If no pool specified, use the default logic None => { let (authz_pool, ..) = - self.ip_pools_fetch_default(&opctx).await?; + self.ip_pools_fetch_default(opctx).await?; authz_pool } }; @@ -224,7 +224,7 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let authz_pool = self.resolve_pool_for_allocation(&opctx, pool).await?; + let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( @@ -695,7 +695,7 @@ impl DataStore { ip_id: Uuid, instance_id: InstanceUuid, ) -> Result, Error> { - let _ = LookupPath::new(&opctx, self) + let _ = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -951,7 +951,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -993,7 +993,7 @@ impl DataStore { instance_id: InstanceUuid, creating_instance: bool, ) -> UpdateResult<(ExternalIp, bool)> { - let (.., authz_instance) = LookupPath::new(&opctx, self) + let (.., authz_instance) = LookupPath::new(opctx, self) .instance_id(instance_id.into_untyped_uuid()) .lookup_for(authz::Action::Modify) .await?; @@ -1167,7 +1167,7 @@ mod tests { let (opctx, datastore) = (db.opctx(), db.datastore()); // No IPs, to start - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, vec![]); // Set up service IP pool range @@ -1177,11 +1177,11 @@ mod tests { )) .unwrap(); let (service_ip_pool, _) = datastore - .ip_pools_service_lookup(&opctx) + .ip_pools_service_lookup(opctx) .await .expect("lookup service ip pool"); datastore - .ip_pool_add_range(&opctx, &service_ip_pool, &ip_range) + .ip_pool_add_range(opctx, &service_ip_pool, &ip_range) .await .expect("add range to service ip pool"); @@ -1207,7 +1207,7 @@ mod tests { }; let external_ip = datastore .external_ip_allocate_omicron_zone( - &opctx, + opctx, OmicronZoneUuid::new_v4(), ZoneKind::Nexus, external_ip, @@ -1220,7 +1220,7 @@ mod tests { external_ips.sort_by_key(|ip| ip.id); // Ensure we see them all. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); // Deallocate a few, and ensure we don't see them anymore. @@ -1229,7 +1229,7 @@ mod tests { if i % 3 == 0 { let id = external_ip.id; datastore - .deallocate_external_ip(&opctx, id) + .deallocate_external_ip(opctx, id) .await .expect("failed to deallocate IP"); removed_ip_ids.insert(id); @@ -1242,7 +1242,7 @@ mod tests { external_ips.retain(|ip| !removed_ip_ids.contains(&ip.id)); // Ensure we see them all remaining IPs. - let ips = read_all_service_ips(&datastore, &opctx).await; + let ips = read_all_service_ips(&datastore, opctx).await; assert_eq!(ips, external_ips); db.terminate().await; diff --git a/nexus/db-queries/src/db/datastore/pub_test_utils.rs b/nexus/db-queries/src/db/datastore/pub_test_utils.rs index 233113ea83..1572861f2e 100644 --- a/nexus/db-queries/src/db/datastore/pub_test_utils.rs +++ b/nexus/db-queries/src/db/datastore/pub_test_utils.rs @@ -38,7 +38,8 @@ mod test { impl TestDatabase { /// Creates a new database for test usage, with a pool. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_pool(log: &Logger) -> Self { let db = test_setup_database(log).await; let cfg = db::Config { url: db.pg_config().clone() }; @@ -48,7 +49,8 @@ mod test { /// Creates a new database for test usage, with a pre-loaded datastore. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_datastore(log: &Logger) -> Self { let db = test_setup_database(log).await; let (opctx, datastore) = @@ -60,7 +62,8 @@ mod test { /// Creates a new database for test usage, with a raw datastore. /// - /// [Self::terminate] should be called before the test finishes. + /// [`Self::terminate`] should be called before the test finishes, + /// or dropping the [`TestDatabase`] will panic. pub async fn new_with_raw_datastore(log: &Logger) -> Self { let db = test_setup_database(log).await; let cfg = db::Config { url: db.pg_config().clone() }; diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 2f17a5e2b6..d1028fbdb6 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -1059,7 +1059,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1077,7 +1077,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1115,7 +1115,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1135,7 +1135,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1159,7 +1159,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, /* pool_name = */ None, @@ -1214,7 +1214,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1233,7 +1233,7 @@ mod tests { context .db .datastore() - .deallocate_external_ip(&context.db.opctx(), ips[0].id) + .deallocate_external_ip(context.db.opctx(), ips[0].id) .await .expect("Failed to release the first external IP address"); @@ -1244,7 +1244,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1272,7 +1272,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, context.default_pool_id().await, @@ -1310,7 +1310,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, pool_name, @@ -1359,7 +1359,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1377,7 +1377,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_3, @@ -1393,7 +1393,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1413,7 +1413,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, OmicronZoneExternalIp::Floating(OmicronZoneExternalFloatingIp { @@ -1439,7 +1439,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::BoundaryNtp, ip_10_0_0_3_snat_0, @@ -1467,7 +1467,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1489,7 +1489,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_32768, @@ -1517,7 +1517,7 @@ mod tests { let err = context .db.datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), snat_service_id, ZoneKind::BoundaryNtp, ip_10_0_0_1_snat_49152, @@ -1557,7 +1557,7 @@ mod tests { .db .datastore() .external_ip_allocate_omicron_zone( - &context.db.opctx(), + context.db.opctx(), service_id, ZoneKind::Nexus, ip_10_0_0_5, @@ -1592,7 +1592,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1610,7 +1610,7 @@ mod tests { .db .datastore() .allocate_instance_snat_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, context.default_pool_id().await, @@ -1664,7 +1664,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), id, instance_id, Some(p1), @@ -1710,7 +1710,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1.clone()), @@ -1733,7 +1733,7 @@ mod tests { .db .datastore() .allocate_instance_ephemeral_ip( - &context.db.opctx(), + context.db.opctx(), Uuid::new_v4(), instance_id, Some(p1), diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index d6d0f22f94..0691ecf863 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -709,7 +709,7 @@ impl Nexus { /// Awaits termination without triggering it. /// /// To trigger termination, see: - /// - [Self::close_servers] or [Self::terminate] + /// - [`Self::close_servers`] or [`Self::terminate`] pub(crate) async fn wait_for_shutdown(&self) -> Result<(), String> { // The internal server is the last server to be closed. //