Skip to content

Commit 8e5379b

Browse files
committed
WIP: Blueprint planner test
1 parent dfd9bba commit 8e5379b

File tree

1 file changed

+192
-1
lines changed

1 file changed

+192
-1
lines changed

nexus/src/app/background/tasks/blueprint_planner.rs

Lines changed: 192 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ impl BlueprintPlanner {
6969
return status;
7070
};
7171
let (target, parent) = &*loaded;
72+
status.unchanged = true;
7273
status.blueprint_id = parent.id;
7374

7475
// Get the inventory most recently seen by the collection
@@ -162,6 +163,7 @@ impl BlueprintPlanner {
162163
"parent_blueprint_id" => %parent.id,
163164
"blueprint_id" => %blueprint.id,
164165
);
166+
status.unchanged = false;
165167
status.blueprint_id = blueprint.id;
166168

167169
// Save it.
@@ -218,7 +220,6 @@ impl BlueprintPlanner {
218220
"blueprint unchanged from current target";
219221
"parent_blueprint_id" => %parent.id,
220222
);
221-
status.unchanged = true;
222223
}
223224

224225
status
@@ -233,3 +234,193 @@ impl BackgroundTask for BlueprintPlanner {
233234
Box::pin(async move { json!(self.plan(opctx).await) })
234235
}
235236
}
237+
238+
#[cfg(test)]
239+
mod test {
240+
use super::*;
241+
use crate::app::background::tasks::blueprint_load::TargetBlueprintLoader;
242+
use crate::app::background::tasks::inventory_collection::InventoryCollector;
243+
use nexus_db_model::{
244+
ByteCount, PhysicalDisk, PhysicalDiskKind, SledBaseboard,
245+
SledSystemHardware, SledUpdate, Zpool,
246+
};
247+
use nexus_test_utils_macros::nexus_test;
248+
use nexus_types::deployment::{
249+
BlueprintPhysicalDiskDisposition, BlueprintZoneDisposition, SledDisk,
250+
};
251+
use nexus_types::external_api::views::{
252+
PhysicalDiskPolicy, PhysicalDiskState,
253+
};
254+
use omicron_common::disk::DiskIdentity;
255+
use omicron_uuid_kinds::GenericUuid as _;
256+
use omicron_uuid_kinds::PhysicalDiskUuid;
257+
use omicron_uuid_kinds::SledUuid;
258+
use std::net::SocketAddr;
259+
use uuid::Uuid;
260+
261+
type ControlPlaneTestContext =
262+
nexus_test_utils::ControlPlaneTestContext<crate::Server>;
263+
264+
#[nexus_test(server = crate::Server)]
265+
async fn test_blueprint_planner(cptestctx: &ControlPlaneTestContext) {
266+
// Set up the test context.
267+
let nexus = &cptestctx.server.server_context().nexus;
268+
let datastore = nexus.datastore();
269+
let opctx = OpContext::for_tests(
270+
cptestctx.logctx.log.clone(),
271+
datastore.clone(),
272+
);
273+
274+
// Spin up the background tasks: blueprint loader,
275+
// inventory collector, and blueprint planner.
276+
let mut loader = TargetBlueprintLoader::new(datastore.clone());
277+
let mut rx_loader = loader.watcher();
278+
loader.activate(&opctx).await;
279+
let (_target, initial_blueprint) = &*rx_loader
280+
.borrow_and_update()
281+
.clone()
282+
.expect("no initial blueprint");
283+
eprintln!("{}", initial_blueprint.display());
284+
285+
let resolver = internal_dns_resolver::Resolver::new_from_addrs(
286+
cptestctx.logctx.log.clone(),
287+
&[cptestctx.internal_dns.dns_server.local_address()],
288+
)
289+
.unwrap();
290+
let mut collector = InventoryCollector::new(
291+
datastore.clone(),
292+
resolver.clone(),
293+
"test_planner",
294+
1,
295+
false,
296+
);
297+
let rx_collector = collector.watcher();
298+
collector.activate(&opctx).await;
299+
300+
let mut planner = BlueprintPlanner::new(
301+
datastore.clone(),
302+
false,
303+
rx_collector,
304+
rx_loader,
305+
);
306+
let mut rx_planner = planner.watcher();
307+
308+
// Without further setup, the planner should run but fail due
309+
// to insufficient resources.
310+
let status = serde_json::from_value::<BlueprintPlannerStatus>(
311+
planner.activate(&opctx).await,
312+
)
313+
.unwrap();
314+
assert!(!status.disabled);
315+
assert!(status.unchanged);
316+
assert_eq!(status.blueprint_id, initial_blueprint.id);
317+
assert!({
318+
let error = status.error.as_deref().unwrap();
319+
error.starts_with("can't plan: ")
320+
&& error.ends_with(
321+
"no available zpools for additional InternalNtp zones",
322+
)
323+
});
324+
325+
// Set up some mock sleds.
326+
let mut sled1 = httptest::Server::run();
327+
let mut sled2 = httptest::Server::run();
328+
let mock_server_ack_requests = |s: &mut httptest::Server| {
329+
s.expect(
330+
httptest::Expectation::matching(httptest::matchers::any())
331+
.times(..)
332+
.respond_with(httptest::responders::status_code(200)),
333+
);
334+
};
335+
mock_server_ack_requests(&mut sled1);
336+
mock_server_ack_requests(&mut sled2);
337+
338+
let sled_id1 = SledUuid::new_v4();
339+
let sled_id2 = SledUuid::new_v4();
340+
let rack_id = Uuid::new_v4();
341+
for (i, (sled_id, server)) in
342+
[(sled_id1, &sled1), (sled_id2, &sled2)].iter().enumerate()
343+
{
344+
let SocketAddr::V6(addr) = server.addr() else {
345+
panic!("expected IPv6 address, got {}", server.addr());
346+
};
347+
let bogus_repo_depot_port = 0;
348+
let update = SledUpdate::new(
349+
sled_id.into_untyped_uuid(),
350+
addr,
351+
bogus_repo_depot_port,
352+
SledBaseboard {
353+
serial_number: i.to_string(),
354+
part_number: "test_planner".into(),
355+
revision: 1,
356+
},
357+
SledSystemHardware {
358+
is_scrimlet: false,
359+
usable_hardware_threads: 4,
360+
usable_physical_ram: ByteCount(1000.into()),
361+
reservoir_size: ByteCount(999.into()),
362+
},
363+
rack_id,
364+
nexus_db_model::Generation::new(),
365+
);
366+
datastore.sled_upsert(update).await.expect("failed to upsert sled");
367+
}
368+
369+
// Add some disks & zpools for zone planning.
370+
for sled_id in initial_blueprint.sleds() {
371+
for i in 0..=1 {
372+
let disk = PhysicalDisk::new(
373+
PhysicalDiskUuid::new_v4(),
374+
String::from("fake-vendor"),
375+
format!("serial-{i}"),
376+
String::from("fake-model"),
377+
PhysicalDiskKind::U2,
378+
sled_id.into_untyped_uuid(),
379+
);
380+
let zpool = Zpool::new(
381+
Uuid::new_v4(),
382+
sled_id.into_untyped_uuid(),
383+
disk.id(),
384+
ByteCount(0.into()),
385+
);
386+
datastore
387+
.physical_disk_and_zpool_insert(&opctx, disk, zpool)
388+
.await
389+
.unwrap();
390+
}
391+
}
392+
collector.activate(&opctx).await;
393+
394+
// Planning should eventually succeed.
395+
let mut blueprint_id = initial_blueprint.id;
396+
for i in 0..10 {
397+
let status = serde_json::from_value::<BlueprintPlannerStatus>(
398+
planner.activate(&opctx).await,
399+
)
400+
.unwrap();
401+
if let Some(error) = status.error {
402+
eprintln!("planning iteration {i} failed: {error}");
403+
} else {
404+
assert!(!status.unchanged);
405+
blueprint_id = status.blueprint_id;
406+
eprintln!("planning succeeded: new blueprint {blueprint_id}");
407+
break;
408+
}
409+
}
410+
411+
// Planning again should not change the plan.
412+
let status = serde_json::from_value::<BlueprintPlannerStatus>(
413+
planner.activate(&opctx).await,
414+
)
415+
.unwrap();
416+
assert_eq!(
417+
status,
418+
BlueprintPlannerStatus {
419+
disabled: false,
420+
error: None,
421+
unchanged: true,
422+
blueprint_id,
423+
}
424+
);
425+
}
426+
}

0 commit comments

Comments
 (0)