From 144e91ae570e503a7e3f470efbcbba268afa4ae0 Mon Sep 17 00:00:00 2001 From: iximeow Date: Tue, 1 Oct 2024 17:13:29 +0000 Subject: [PATCH] Add a boot device field to Instance, forward along as appropriate (#6585) the Omicron side of adding explicit boot order selection to instances (counterpart to [propolis#756](https://github.com/oxidecomputer/propolis/pull/756)). first, this extends `params::InstanceCreate` to take a new `boot_disk: Option`. additionally, this adds a `PUT /v1/instances/{instance}` to update instances. the only property that can be updated at the moment is `boot_disk`, pick a new boot disk or unset it entirely. this also partially subsumes https://github.com/oxidecomputer/omicron/pull/6321. finally, this updates Omicron to reference a recent enough Propolis that #756 is included. a surprising discovery along the way: you can specify a disk to be attached multiple times in `disks` today, when creating an instance, and we're fine with it! this carries through with the new `boot_disk` field: if you specify the disk as `boot_disk` and in `disks`, it is technically listing the disk for attachment twice but this will succeed. --- Cargo.lock | 37 +- Cargo.toml | 6 +- common/src/api/external/mod.rs | 13 + dev-tools/omdb/src/bin/omdb/db.rs | 3 + end-to-end-tests/src/instance_launch.rs | 5 +- nexus/db-model/src/instance.rs | 15 + nexus/db-model/src/schema.rs | 1 + nexus/db-model/src/schema_versions.rs | 3 +- nexus/db-queries/src/db/datastore/disk.rs | 12 +- nexus/db-queries/src/db/datastore/instance.rs | 155 ++++- .../db-queries/src/db/datastore/migration.rs | 1 + .../virtual_provisioning_collection.rs | 1 + nexus/db-queries/src/db/datastore/vpc.rs | 1 + .../db-queries/src/db/queries/external_ip.rs | 1 + .../src/db/queries/network_interface.rs | 1 + nexus/external-api/output/nexus_tags.txt | 1 + nexus/external-api/src/lib.rs | 13 + .../tasks/instance_reincarnation.rs | 1 + nexus/src/app/instance.rs | 97 ++- nexus/src/app/sagas/instance_create.rs | 108 ++- nexus/src/app/sagas/instance_delete.rs | 5 +- nexus/src/app/sagas/instance_migrate.rs | 1 + nexus/src/app/sagas/instance_start.rs | 1 + nexus/src/app/sagas/instance_update/mod.rs | 1 + nexus/src/app/sagas/snapshot_create.rs | 8 +- nexus/src/external_api/http_entrypoints.rs | 36 + nexus/test-utils/src/resource_helpers.rs | 2 + nexus/tests/integration_tests/endpoints.rs | 6 + nexus/tests/integration_tests/external_ips.rs | 1 + nexus/tests/integration_tests/instances.rs | 618 +++++++++++++++++- nexus/tests/integration_tests/projects.rs | 1 + nexus/tests/integration_tests/quotas.rs | 1 + nexus/tests/integration_tests/schema.rs | 109 +++ nexus/tests/integration_tests/snapshots.rs | 10 +- .../integration_tests/subnet_allocation.rs | 1 + nexus/types/src/external_api/params.rs | 35 + openapi/nexus-internal.json | 6 + openapi/nexus.json | 85 +++ openapi/sled-agent.json | 36 + package-manifest.toml | 4 +- schema/crdb/add-instance-boot-disk/up01.sql | 1 + schema/crdb/add-instance-boot-disk/up02.sql | 50 ++ schema/crdb/dbinit.sql | 11 +- sled-agent/src/instance.rs | 4 + sled-agent/src/sim/sled_agent.rs | 1 + sled-agent/types/src/instance.rs | 1 + 46 files changed, 1463 insertions(+), 47 deletions(-) create mode 100644 schema/crdb/add-instance-boot-disk/up01.sql create mode 100644 schema/crdb/add-instance-boot-disk/up02.sql diff --git a/Cargo.lock b/Cargo.lock index 4aab6fc802..6361080246 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -663,7 +663,7 @@ dependencies = [ [[package]] name = "bhyve_api" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831#fae5334bcad5e864794332c6fed5e6bb9ec88831" +source = "git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927" dependencies = [ "bhyve_api_sys", "libc", @@ -673,7 +673,7 @@ dependencies = [ [[package]] name = "bhyve_api_sys" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831#fae5334bcad5e864794332c6fed5e6bb9ec88831" +source = "git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927" dependencies = [ "libc", "strum", @@ -4736,7 +4736,7 @@ dependencies = [ "indicatif", "libc", "libnet 0.1.0 (git+https://github.com/oxidecomputer/netadm-sys?branch=main)", - "propolis-client", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831)", "propolis-server-config", "rand", "regex", @@ -6621,7 +6621,7 @@ dependencies = [ "pq-sys", "pretty_assertions", "progenitor-client", - "propolis-client", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", "rand", "rcgen", "ref-cast", @@ -6879,7 +6879,7 @@ dependencies = [ "oximeter-producer", "oxnet", "pretty_assertions", - "propolis-client", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", "propolis-mock-server", "rand", "rcgen", @@ -8508,6 +8508,27 @@ dependencies = [ "syn 2.0.79", ] +[[package]] +name = "propolis-client" +version = "0.1.0" +source = "git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927#11371b0f3743f8df5b047dc0edc2699f4bdf3927" +dependencies = [ + "async-trait", + "base64 0.21.7", + "futures", + "progenitor", + "rand", + "reqwest 0.12.7", + "schemars", + "serde", + "serde_json", + "slog", + "thiserror", + "tokio", + "tokio-tungstenite 0.21.0", + "uuid", +] + [[package]] name = "propolis-client" version = "0.1.0" @@ -8532,7 +8553,7 @@ dependencies = [ [[package]] name = "propolis-mock-server" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831#fae5334bcad5e864794332c6fed5e6bb9ec88831" +source = "git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927#11371b0f3743f8df5b047dc0edc2699f4bdf3927" dependencies = [ "anyhow", "atty", @@ -8574,7 +8595,7 @@ dependencies = [ [[package]] name = "propolis_types" version = "0.0.0" -source = "git+https://github.com/oxidecomputer/propolis?rev=fae5334bcad5e864794332c6fed5e6bb9ec88831#fae5334bcad5e864794332c6fed5e6bb9ec88831" +source = "git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927#11371b0f3743f8df5b047dc0edc2699f4bdf3927" dependencies = [ "schemars", "serde", @@ -10223,7 +10244,7 @@ dependencies = [ "omicron-uuid-kinds", "omicron-workspace-hack", "oxnet", - "propolis-client", + "propolis-client 0.1.0 (git+https://github.com/oxidecomputer/propolis?rev=11371b0f3743f8df5b047dc0edc2699f4bdf3927)", "rcgen", "schemars", "serde", diff --git a/Cargo.toml b/Cargo.toml index 07765e098e..98b0246d5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -511,9 +511,9 @@ prettyplease = { version = "0.2.22", features = ["verbatim"] } proc-macro2 = "1.0" progenitor = "0.8.0" progenitor-client = "0.8.0" -bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "fae5334bcad5e864794332c6fed5e6bb9ec88831" } -propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "fae5334bcad5e864794332c6fed5e6bb9ec88831" } -propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "fae5334bcad5e864794332c6fed5e6bb9ec88831" } +bhyve_api = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } +propolis-client = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } +propolis-mock-server = { git = "https://github.com/oxidecomputer/propolis", rev = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" } proptest = "1.5.0" qorb = { git = "https://github.com/oxidecomputer/qorb", branch = "master" } quote = "1.0" diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 05129c689b..a34f5b71ac 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -348,6 +348,16 @@ impl TryFrom for NameOrId { } } +impl FromStr for NameOrId { + // TODO: We should have better error types here. + // See https://github.com/oxidecomputer/omicron/issues/347 + type Err = String; + + fn from_str(value: &str) -> Result { + NameOrId::try_from(String::from(value)) + } +} + impl From for NameOrId { fn from(name: Name) -> Self { NameOrId::Name(name) @@ -1183,6 +1193,9 @@ pub struct Instance { /// RFC1035-compliant hostname for the Instance. pub hostname: String, + /// the ID of the disk used to boot this Instance, if a specific one is assigned. + pub boot_disk_id: Option, + #[serde(flatten)] pub runtime: InstanceRuntimeState, diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index 3bbbd46b84..be068f0912 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -2941,6 +2941,7 @@ async fn cmd_db_instance_info( const VCPUS: &'static str = "vCPUs"; const MEMORY: &'static str = "memory"; const HOSTNAME: &'static str = "hostname"; + const BOOT_DISK: &'static str = "boot disk"; const AUTO_RESTART: &'static str = "auto-restart"; const STATE: &'static str = "nexus state"; const LAST_MODIFIED: &'static str = "last modified at"; @@ -2963,6 +2964,7 @@ async fn cmd_db_instance_info( DELETED, VCPUS, MEMORY, + BOOT_DISK, HOSTNAME, AUTO_RESTART, STATE, @@ -3006,6 +3008,7 @@ async fn cmd_db_instance_info( println!(" {VCPUS:>WIDTH$}: {}", instance.ncpus.0 .0); println!(" {MEMORY:>WIDTH$}: {}", instance.memory.0); println!(" {HOSTNAME:>WIDTH$}: {}", instance.hostname); + println!(" {BOOT_DISK:>WIDTH$}: {:?}", instance.boot_disk_id); print_multiline_debug(AUTO_RESTART, &instance.auto_restart); println!("\n{:=<80}", "== RUNTIME STATE "); let InstanceRuntimeState { diff --git a/end-to-end-tests/src/instance_launch.rs b/end-to-end-tests/src/instance_launch.rs index a5cd02202e..fc3a29bffa 100644 --- a/end-to-end-tests/src/instance_launch.rs +++ b/end-to-end-tests/src/instance_launch.rs @@ -66,9 +66,10 @@ async fn instance_launch() -> Result<()> { hostname: "localshark".parse().unwrap(), // 🦈 memory: ByteCount(1024 * 1024 * 1024), ncpus: InstanceCpuCount(2), - disks: vec![InstanceDiskAttachment::Attach { + boot_disk: Some(InstanceDiskAttachment::Attach { name: disk_name.clone(), - }], + }), + disks: Vec::new(), network_interfaces: InstanceNetworkInterfaceAttachment::Default, external_ips: vec![ExternalIpCreate::Ephemeral { pool: None }], user_data: String::new(), diff --git a/nexus/db-model/src/instance.rs b/nexus/db-model/src/instance.rs index f57a70aa80..3bc9d3f993 100644 --- a/nexus/db-model/src/instance.rs +++ b/nexus/db-model/src/instance.rs @@ -62,6 +62,10 @@ pub struct Instance { #[diesel(embed)] pub auto_restart: InstanceAutoRestart, + /// The primary boot disk for this instance. + #[diesel(column_name = boot_disk_id)] + pub boot_disk_id: Option, + #[diesel(embed)] pub runtime_state: InstanceRuntimeState, @@ -115,6 +119,9 @@ impl Instance { memory: params.memory.into(), hostname: params.hostname.to_string(), auto_restart, + // Intentionally ignore `params.boot_disk_id` here: we can't set + // `boot_disk_id` until the referenced disk is attached. + boot_disk_id: None, runtime_state, @@ -520,3 +527,11 @@ mod optional_time_delta { .serialize(serializer) } } + +/// The parts of an Instance that can be directly updated after creation. +#[derive(Clone, Debug, AsChangeset, Serialize, Deserialize)] +#[diesel(table_name = instance, treat_none_as_null = true)] +pub struct InstanceUpdate { + #[diesel(column_name = boot_disk_id)] + pub boot_disk_id: Option, +} diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index 9e0ec1055f..d9e2c43e75 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -409,6 +409,7 @@ table! { hostname -> Text, auto_restart_policy -> Nullable, auto_restart_cooldown -> Nullable, + boot_disk_id -> Nullable, time_state_updated -> Timestamptz, state_generation -> Int8, active_propolis_id -> Nullable, diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 6f16241906..12e03e6d4e 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -17,7 +17,7 @@ use std::collections::BTreeMap; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(106, 0, 0); +pub const SCHEMA_VERSION: SemverVersion = SemverVersion::new(107, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -29,6 +29,7 @@ static KNOWN_VERSIONS: Lazy> = Lazy::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(107, "add-instance-boot-disk"), KnownVersion::new(106, "dataset-kinds-update"), KnownVersion::new(105, "inventory-nvme-firmware"), KnownVersion::new(104, "lookup-bgp-config-indexes"), diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 129b5fa59d..7adc6df3e1 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -335,7 +335,11 @@ impl DataStore { .into_boxed() .filter(instance::dsl::state .eq_any(ok_to_detach_instance_states) - .and(instance::dsl::active_propolis_id.is_null())), + .and(instance::dsl::active_propolis_id.is_null()) + .and( + instance::dsl::boot_disk_id.ne(authz_disk.id()) + .or(instance::dsl::boot_disk_id.is_null()) + )), disk::table .into_boxed() .filter(disk::dsl::disk_state.eq_any(ok_to_detach_disk_state_labels)), @@ -388,6 +392,12 @@ impl DataStore { // Ok-to-be-detached instance states: api::external::InstanceState::Creating | api::external::InstanceState::Stopped => { + if collection.boot_disk_id == Some(authz_disk.id()) { + return Err(Error::conflict( + "boot disk cannot be detached" + )); + } + // We can't detach, but the error hasn't // helped us infer why. return Err(Error::internal_error( diff --git a/nexus/db-queries/src/db/datastore/instance.rs b/nexus/db-queries/src/db/datastore/instance.rs index 6ab4f5f6b5..e89cd8f234 100644 --- a/nexus/db-queries/src/db/datastore/instance.rs +++ b/nexus/db-queries/src/db/datastore/instance.rs @@ -24,6 +24,7 @@ use crate::db::model::InstanceAutoRestart; use crate::db::model::InstanceAutoRestartPolicy; use crate::db::model::InstanceRuntimeState; use crate::db::model::InstanceState; +use crate::db::model::InstanceUpdate; use crate::db::model::Migration; use crate::db::model::MigrationState; use crate::db::model::Name; @@ -37,6 +38,7 @@ use crate::db::pool::DbConnection; use crate::db::update_and_check::UpdateAndCheck; use crate::db::update_and_check::UpdateAndQueryResult; use crate::db::update_and_check::UpdateStatus; +use crate::transaction_retry::OptionalError; use async_bb8_diesel::AsyncRunQueryDsl; use chrono::Utc; use diesel::prelude::*; @@ -243,7 +245,7 @@ impl From for external::Instance { .hostname .parse() .expect("found invalid hostname in the database"), - + boot_disk_id: value.instance.boot_disk_id, runtime: external::InstanceRuntimeState { run_state: value.effective_state(), time_run_state_updated, @@ -1003,6 +1005,156 @@ impl DataStore { Ok(result) } + pub async fn instance_reconfigure( + &self, + opctx: &OpContext, + authz_instance: &authz::Instance, + update: InstanceUpdate, + ) -> Result { + opctx.authorize(authz::Action::Modify, authz_instance).await?; + + use crate::db::model::InstanceState; + + use db::schema::disk::dsl as disk_dsl; + use db::schema::instance::dsl as instance_dsl; + use db::schema::vmm::dsl as vmm_dsl; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + let (instance, vmm) = self + .transaction_retry_wrapper("reconfigure_instance") + .transaction(&conn, |conn| { + let err = err.clone(); + let update = update.clone(); + async move { + // * Allow reconfiguration in NoVmm because there is no VMM + // to contend with. + // * Allow reconfiguration in Failed to allow changing the + // boot disk of a failed instance and free its boot disk + // for detach. + // * Allow reconfiguration in Creating because one of the + // last steps of instance creation, while the instance is + // still in Creating, is to reconfigure the instance to + // the desired boot disk. + let ok_to_reconfigure_instance_states = [ + InstanceState::NoVmm, + InstanceState::Failed, + InstanceState::Creating, + ]; + + let instance_state = instance_dsl::instance + .filter(instance_dsl::id.eq(authz_instance.id())) + .filter(instance_dsl::time_deleted.is_null()) + .select(instance_dsl::state) + .first_async::(&conn) + .await; + + match instance_state { + Ok(state) => { + let state_ok = ok_to_reconfigure_instance_states + .contains(&state); + + if !state_ok { + return Err(err.bail(Error::conflict( + "instance must be stopped to update", + ))); + } + } + Err(diesel::NotFound) => { + // If the instance simply doesn't exist, we + // shouldn't retry. Bail with a useful error. + return Err(err.bail(Error::not_found_by_id( + ResourceType::Instance, + &authz_instance.id(), + ))); + } + Err(e) => { + return Err(e); + } + } + + if let Some(disk_id) = update.boot_disk_id { + // Ensure the disk is currently attached before updating + // the database. + let expected_state = api::external::DiskState::Attached( + authz_instance.id(), + ); + + let attached_disk: Option = disk_dsl::disk + .filter(disk_dsl::id.eq(disk_id)) + .filter( + disk_dsl::attach_instance_id + .eq(authz_instance.id()), + ) + .filter( + disk_dsl::disk_state.eq(expected_state.label()), + ) + .select(disk_dsl::id) + .first_async::(&conn) + .await + .optional()?; + + if attached_disk.is_none() { + return Err(err.bail(Error::conflict( + "boot disk must be attached", + ))); + } + } + + // if and when `Update` can update other fields, set them + // here. + // + // NOTE: from this point forward it is OK if we update the + // instance's `boot_disk_id` column with the updated value + // again. It will have already been assigned with constraint + // checking performed above, so updates will just be + // repetitive, not harmful. + + // Update the row. We don't care about the returned + // UpdateStatus, either way the database has been updated + // with the state we're setting. + diesel::update(instance_dsl::instance) + .filter(instance_dsl::id.eq(authz_instance.id())) + .set(update) + .execute_async(&conn) + .await?; + + // TODO: dedupe this query and `instance_fetch_with_vmm`. + // At the moment, we're only allowing instance + // reconfiguration in states that would have no VMM, but + // load it anyway so that we return correct data if this is + // relaxed in the future... + let (instance, vmm) = instance_dsl::instance + .filter(instance_dsl::id.eq(authz_instance.id())) + .filter(instance_dsl::time_deleted.is_null()) + .left_join( + vmm_dsl::vmm.on(vmm_dsl::id + .nullable() + .eq(instance_dsl::active_propolis_id) + .and(vmm_dsl::time_deleted.is_null())), + ) + .select(( + Instance::as_select(), + Option::::as_select(), + )) + .get_result_async(&conn) + .await?; + + Ok((instance, vmm)) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + Ok(InstanceAndActiveVmm { instance, vmm }) + } + pub async fn project_delete_instance( &self, opctx: &OpContext, @@ -1799,6 +1951,7 @@ mod tests { params::InstanceNetworkInterfaceAttachment::None, external_ips: Vec::new(), disks: Vec::new(), + boot_disk: None, ssh_public_keys: None, start: false, auto_restart_policy: Default::default(), diff --git a/nexus/db-queries/src/db/datastore/migration.rs b/nexus/db-queries/src/db/datastore/migration.rs index 819a11a953..584f00f084 100644 --- a/nexus/db-queries/src/db/datastore/migration.rs +++ b/nexus/db-queries/src/db/datastore/migration.rs @@ -234,6 +234,7 @@ mod tests { params::InstanceNetworkInterfaceAttachment::None, external_ips: Vec::new(), disks: Vec::new(), + boot_disk: None, ssh_public_keys: None, start: false, auto_restart_policy: Default::default(), diff --git a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs index d5bf84b0b6..0e200f47bb 100644 --- a/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs +++ b/nexus/db-queries/src/db/datastore/virtual_provisioning_collection.rs @@ -454,6 +454,7 @@ mod test { params::InstanceNetworkInterfaceAttachment::None, external_ips: Vec::new(), disks: Vec::new(), + boot_disk: None, ssh_public_keys: None, start: false, auto_restart_policy: Default::default(), diff --git a/nexus/db-queries/src/db/datastore/vpc.rs b/nexus/db-queries/src/db/datastore/vpc.rs index 0993660de7..27435fa3aa 100644 --- a/nexus/db-queries/src/db/datastore/vpc.rs +++ b/nexus/db-queries/src/db/datastore/vpc.rs @@ -2842,6 +2842,7 @@ mod tests { params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, ssh_public_keys: None, start: false, auto_restart_policy: Default::default(), diff --git a/nexus/db-queries/src/db/queries/external_ip.rs b/nexus/db-queries/src/db/queries/external_ip.rs index 804b6d625c..2caab9ee46 100644 --- a/nexus/db-queries/src/db/queries/external_ip.rs +++ b/nexus/db-queries/src/db/queries/external_ip.rs @@ -1015,6 +1015,7 @@ mod tests { network_interfaces: Default::default(), external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }); diff --git a/nexus/db-queries/src/db/queries/network_interface.rs b/nexus/db-queries/src/db/queries/network_interface.rs index 494ecd4982..d664f7459a 100644 --- a/nexus/db-queries/src/db/queries/network_interface.rs +++ b/nexus/db-queries/src/db/queries/network_interface.rs @@ -1859,6 +1859,7 @@ mod tests { network_interfaces: InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; diff --git a/nexus/external-api/output/nexus_tags.txt b/nexus/external-api/output/nexus_tags.txt index cb9d283bad..cd6385f589 100644 --- a/nexus/external-api/output/nexus_tags.txt +++ b/nexus/external-api/output/nexus_tags.txt @@ -62,6 +62,7 @@ instance_serial_console_stream GET /v1/instances/{instance}/seria instance_ssh_public_key_list GET /v1/instances/{instance}/ssh-public-keys instance_start POST /v1/instances/{instance}/start instance_stop POST /v1/instances/{instance}/stop +instance_update PUT /v1/instances/{instance} instance_view GET /v1/instances/{instance} API operations found with tag "login" diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs index 91676bfc3b..d41a23346e 100644 --- a/nexus/external-api/src/lib.rs +++ b/nexus/external-api/src/lib.rs @@ -1117,6 +1117,19 @@ pub trait NexusExternalApi { path_params: Path, ) -> Result; + /// Update instance + #[endpoint { + method = PUT, + path = "/v1/instances/{instance}", + tags = ["instances"], + }] + async fn instance_update( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + instance_config: TypedBody, + ) -> Result, HttpError>; + /// Reboot an instance #[endpoint { method = POST, diff --git a/nexus/src/app/background/tasks/instance_reincarnation.rs b/nexus/src/app/background/tasks/instance_reincarnation.rs index 4afd2f6a7e..25704df6a5 100644 --- a/nexus/src/app/background/tasks/instance_reincarnation.rs +++ b/nexus/src/app/background/tasks/instance_reincarnation.rs @@ -382,6 +382,7 @@ mod test { params::InstanceNetworkInterfaceAttachment::None, external_ips: Vec::new(), disks: Vec::new(), + boot_disk: None, ssh_public_keys: None, start: state == InstanceState::Vmm, auto_restart_policy: Some(auto_restart), diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 83ccf6bbb8..ca8c441a41 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -19,6 +19,7 @@ use crate::external_api::params; use cancel_safe_futures::prelude::*; use futures::future::Fuse; use futures::{FutureExt, SinkExt, StreamExt}; +use nexus_db_model::InstanceUpdate; use nexus_db_model::IpAttachState; use nexus_db_model::IpKind; use nexus_db_model::Vmm as DbVmm; @@ -61,6 +62,8 @@ use propolis_client::support::WebSocketStream; use sagas::instance_common::ExternalIpAttach; use sagas::instance_start; use sagas::instance_update; +use sled_agent_client::types::BootOrderEntry; +use sled_agent_client::types::BootSettings; use sled_agent_client::types::InstanceMigrationTargetParams; use sled_agent_client::types::InstanceProperties; use sled_agent_client::types::VmmPutStateBody; @@ -299,6 +302,40 @@ impl super::Nexus { } } + pub(crate) async fn instance_reconfigure( + self: &Arc, + opctx: &OpContext, + instance_lookup: &lookup::Instance<'_>, + params: ¶ms::InstanceUpdate, + ) -> UpdateResult { + let (.., authz_project, authz_instance) = + instance_lookup.lookup_for(authz::Action::Modify).await?; + + let boot_disk_id = match params.boot_disk.clone() { + Some(disk) => { + let selector = params::DiskSelector { + project: match &disk { + NameOrId::Name(_) => Some(authz_project.id().into()), + NameOrId::Id(_) => None, + }, + disk, + }; + let (.., authz_disk) = self + .disk_lookup(opctx, selector)? + .lookup_for(authz::Action::Modify) + .await?; + + Some(authz_disk.id()) + } + None => None, + }; + + let update = InstanceUpdate { boot_disk_id }; + self.datastore() + .instance_reconfigure(opctx, &authz_instance, update) + .await + } + pub(crate) async fn project_create_instance( self: &Arc, opctx: &OpContext, @@ -308,14 +345,17 @@ impl super::Nexus { let (.., authz_project) = project_lookup.lookup_for(authz::Action::CreateChild).await?; + let all_disks: Vec<¶ms::InstanceDiskAttachment> = + params.boot_disk.iter().chain(params.disks.iter()).collect(); + // Validate parameters - if params.disks.len() > MAX_DISKS_PER_INSTANCE as usize { + if all_disks.len() > MAX_DISKS_PER_INSTANCE as usize { return Err(Error::invalid_request(&format!( "cannot attach more than {} disks to instance", MAX_DISKS_PER_INSTANCE ))); } - for disk in ¶ms.disks { + for disk in all_disks.iter() { if let params::InstanceDiskAttachment::Create(create) = disk { self.validate_disk_create_params(opctx, &authz_project, create) .await?; @@ -436,6 +476,14 @@ impl super::Nexus { } } + // It is deceptively inconvenient to do an early check that the boot + // disk is valid here! We accept boot disk by name or ID, but disk + // creation and attachment requests as part of instance creation all + // require the disk name. So if the boot disk is an ID, we would need + // to look up all attachment requests to compare the named disk and + // to-be-attached disks. Instead, leave this for the other end of the + // saga when we'd go to set the boot disk. + let saga_params = sagas::instance_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), project_id: authz_project.id(), @@ -1032,6 +1080,8 @@ impl super::Nexus { ) .await?; + let mut boot_disk_name = None; + let mut disk_reqs = vec![]; for disk in &disks { // Disks that are attached to an instance should always have a slot @@ -1066,6 +1116,12 @@ impl super::Nexus { ) .await?; + // Propolis wants the name of the boot disk rather than ID, because we send names + // rather than IDs in the disk requsts as assembled below. + if db_instance.boot_disk_id == Some(disk.id()) { + boot_disk_name = Some(disk.name().to_string()); + } + disk_reqs.push(sled_agent_client::types::DiskRequest { name: disk.name().to_string(), slot: sled_agent_client::types::Slot(slot.0), @@ -1078,6 +1134,41 @@ impl super::Nexus { }); } + let boot_settings = if let Some(boot_disk_name) = boot_disk_name { + Some(BootSettings { + order: vec![BootOrderEntry { name: boot_disk_name }], + }) + } else { + if let Some(instance_boot_disk_id) = + db_instance.boot_disk_id.as_ref() + { + // This should never occur: when setting the boot disk we ensure it is + // attached, and when detaching a disk we ensure it is not the boot + // disk. If this error is seen, the instance somehow had a boot disk + // that was not attached anyway. + // + // When Propolis accepts an ID rather than name, and we don't need to + // look up a name when assembling the Propolis request, we might as well + // remove this check; we can just pass the ID and rely on Propolis' own + // check that the boot disk is attached. + if boot_disk_name.is_none() { + error!(self.log, "instance boot disk is not attached"; + "boot_disk_id" => ?instance_boot_disk_id, + "instance id" => %db_instance.id()); + + return Err(InstanceStateChangeError::Other( + Error::internal_error(&format!( + "instance {} has boot disk {:?} but it is not attached", + db_instance.id(), + db_instance.boot_disk_id.as_ref(), + )), + )); + } + } + + None + }; + let nics = self .db_datastore .derive_guest_network_interface_info(&opctx, &authz_instance) @@ -1225,6 +1316,7 @@ impl super::Nexus { search_domains: Vec::new(), }, disks: disk_reqs, + boot_settings, cloud_init_bytes: Some(base64::Engine::encode( &base64::engine::general_purpose::STANDARD, db_instance.generate_cidata(&ssh_keys)?, @@ -2310,6 +2402,7 @@ mod tests { network_interfaces: InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, ssh_public_keys: None, start: false, auto_restart_policy: Default::default(), diff --git a/nexus/src/app/sagas/instance_create.rs b/nexus/src/app/sagas/instance_create.rs index 10c4e1ec30..c8680701b1 100644 --- a/nexus/src/app/sagas/instance_create.rs +++ b/nexus/src/app/sagas/instance_create.rs @@ -100,6 +100,10 @@ declare_saga_actions! { + sic_attach_disk_to_instance - sic_attach_disk_to_instance_undo } + SET_BOOT_DISK -> "set_boot_disk" { + + sic_set_boot_disk + - sic_set_boot_disk_undo + } MOVE_TO_STOPPED -> "stopped_instance" { + sic_move_to_stopped } @@ -244,9 +248,19 @@ impl NexusSaga for SagaInstanceCreate { )?; } + // Build an iterator of all InstanceDiskAttachment entries in the + // request; these could either be a boot disk or data disks. As far as + // create/attach is concerned, they're all disks and all need to be + // processed just the same. + let all_disks = params + .create_params + .boot_disk + .iter() + .chain(params.create_params.disks.iter()); + // Appends the disk create saga as a subsaga directly to the instance // create builder. - for (i, disk) in params.create_params.disks.iter().enumerate() { + for (i, disk) in all_disks.clone().enumerate() { if let InstanceDiskAttachment::Create(create_disk) = disk { let subsaga_name = SagaName::new(&format!("instance-create-disk-{i}")); @@ -268,7 +282,7 @@ impl NexusSaga for SagaInstanceCreate { // Attaches all disks included in the instance create request, including // those which were previously created by the disk create subsagas. - for (i, disk_attach) in params.create_params.disks.iter().enumerate() { + for (i, disk_attach) in all_disks.enumerate() { let subsaga_name = SagaName::new(&format!("instance-attach-disk-{i}")); let mut subsaga_builder = DagBuilder::new(subsaga_name); @@ -292,6 +306,7 @@ impl NexusSaga for SagaInstanceCreate { )?; } + builder.append(set_boot_disk_action()); builder.append(move_to_stopped_action()); Ok(builder.build()?) } @@ -1021,6 +1036,90 @@ async fn sic_delete_instance_record( Ok(()) } +// This is done intentionally late in instance creation: +// * if the boot disk is provided by name and that disk is created along with +// the disk, there would not have been an ID to use any earlier +// * if the boot disk is pre-existing, we still must wait for `disk-attach` +// subsagas to complete; attempting to set the boot disk earlier would error +// out because the desired boot disk is not attached. +/// Set the instance's boot disk, if one was specified. +async fn sic_set_boot_disk( + sagactx: NexusActionContext, +) -> Result<(), ActionError> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let datastore = osagactx.datastore(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + // TODO: instead of taking this from create_params, if this is a name, take + // it from the ID we get when creating the named disk. + let Some(boot_disk) = + params.create_params.boot_disk.as_ref().map(|x| x.name()) + else { + return Ok(()); + }; + + let instance_id = sagactx.lookup::("instance_id")?; + + let (.., authz_instance) = LookupPath::new(&opctx, &datastore) + .instance_id(instance_id.into_untyped_uuid()) + .lookup_for(authz::Action::Modify) + .await + .map_err(ActionError::action_failed)?; + + let (.., authz_disk) = LookupPath::new(&opctx, datastore) + .project_id(params.project_id) + .disk_name_owned(boot_disk.into()) + .lookup_for(authz::Action::Read) + .await + .map_err(ActionError::action_failed)?; + + let initial_configuration = + nexus_db_model::InstanceUpdate { boot_disk_id: Some(authz_disk.id()) }; + + datastore + .instance_reconfigure(&opctx, &authz_instance, initial_configuration) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + +async fn sic_set_boot_disk_undo( + sagactx: NexusActionContext, +) -> Result<(), anyhow::Error> { + let osagactx = sagactx.user_data(); + let params = sagactx.saga_params::()?; + let datastore = osagactx.datastore(); + let opctx = crate::context::op_context_for_saga_action( + &sagactx, + ¶ms.serialized_authn, + ); + + let instance_id = sagactx.lookup::("instance_id")?; + + let (.., authz_instance) = LookupPath::new(&opctx, &datastore) + .instance_id(instance_id.into_untyped_uuid()) + .lookup_for(authz::Action::Modify) + .await + .map_err(ActionError::action_failed)?; + + // If there was a boot disk, clear it. If there was not a boot disk, + // this is a no-op. + let undo_configuration = + nexus_db_model::InstanceUpdate { boot_disk_id: None }; + + datastore + .instance_reconfigure(&opctx, &authz_instance, undo_configuration) + .await + .map_err(ActionError::action_failed)?; + + Ok(()) +} + async fn sic_move_to_stopped( sagactx: NexusActionContext, ) -> Result<(), ActionError> { @@ -1120,11 +1219,12 @@ pub mod test { external_ips: vec![params::ExternalIpCreate::Ephemeral { pool: None, }], - disks: vec![params::InstanceDiskAttachment::Attach( + boot_disk: Some(params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: DISK_NAME.parse().unwrap(), }, - )], + )), + disks: Vec::new(), start: false, auto_restart_policy: Default::default(), }, diff --git a/nexus/src/app/sagas/instance_delete.rs b/nexus/src/app/sagas/instance_delete.rs index 5c45b6cfdc..c1ff9c0bfb 100644 --- a/nexus/src/app/sagas/instance_delete.rs +++ b/nexus/src/app/sagas/instance_delete.rs @@ -245,9 +245,10 @@ mod test { external_ips: vec![params::ExternalIpCreate::Ephemeral { pool: None, }], - disks: vec![params::InstanceDiskAttachment::Attach( + boot_disk: Some(params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: DISK_NAME.parse().unwrap() }, - )], + )), + disks: Vec::new(), start: false, auto_restart_policy: Default::default(), } diff --git a/nexus/src/app/sagas/instance_migrate.rs b/nexus/src/app/sagas/instance_migrate.rs index e17d7f166d..2ddbe5b05c 100644 --- a/nexus/src/app/sagas/instance_migrate.rs +++ b/nexus/src/app/sagas/instance_migrate.rs @@ -617,6 +617,7 @@ mod tests { params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }, diff --git a/nexus/src/app/sagas/instance_start.rs b/nexus/src/app/sagas/instance_start.rs index b92533cd68..b10328f371 100644 --- a/nexus/src/app/sagas/instance_start.rs +++ b/nexus/src/app/sagas/instance_start.rs @@ -843,6 +843,7 @@ mod test { params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }, diff --git a/nexus/src/app/sagas/instance_update/mod.rs b/nexus/src/app/sagas/instance_update/mod.rs index f0610c04c8..bfe7692738 100644 --- a/nexus/src/app/sagas/instance_update/mod.rs +++ b/nexus/src/app/sagas/instance_update/mod.rs @@ -1551,6 +1551,7 @@ mod test { params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }, diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index b0a2633f69..b47d036d9c 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -2109,6 +2109,11 @@ mod test { disks_to_attach: Vec, ) -> InstanceAndActiveVmm { let instances_url = format!("/v1/instances?project={}", PROJECT_NAME,); + + let mut disks_iter = disks_to_attach.into_iter(); + let boot_disk = disks_iter.next(); + let data_disks: Vec = disks_iter.collect(); + let instance: Instance = object_create( client, &instances_url, @@ -2126,7 +2131,8 @@ mod test { ssh_public_keys: Some(Vec::new()), network_interfaces: params::InstanceNetworkInterfaceAttachment::None, - disks: disks_to_attach, + boot_disk, + disks: data_disks, external_ips: vec![], start: true, auto_restart_policy: Default::default(), diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index bc128d9278..33667a3da0 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -2143,6 +2143,42 @@ impl NexusExternalApi for NexusExternalApiImpl { .await } + async fn instance_update( + rqctx: RequestContext, + query_params: Query, + path_params: Path, + reconfigure_params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let nexus = &apictx.context.nexus; + let path = path_params.into_inner(); + let query = query_params.into_inner(); + let reconfigure_params = reconfigure_params.into_inner(); + let instance_selector = params::InstanceSelector { + project: query.project, + instance: path.instance, + }; + let handler = async { + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let instance_lookup = + nexus.instance_lookup(&opctx, instance_selector)?; + let instance = nexus + .instance_reconfigure( + &opctx, + &instance_lookup, + &reconfigure_params, + ) + .await?; + Ok(HttpResponseOk(instance.into())) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + async fn instance_reboot( rqctx: RequestContext, query_params: Query, diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index fad04faaed..8b1d0e5f6b 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -523,6 +523,7 @@ pub async fn create_instance_with( auto_restart_policy: Option, ) -> Instance { let url = format!("/v1/instances?project={}", project_name); + object_create( client, &url, @@ -541,6 +542,7 @@ pub async fn create_instance_with( network_interfaces: nics.clone(), external_ips, disks, + boot_disk: None, start, auto_restart_policy, }, diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 8d271839f3..ff4c5a8712 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -429,9 +429,12 @@ pub static DEMO_INSTANCE_CREATE: Lazy = pool: Some(DEMO_IP_POOL_NAME.clone().into()), }], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }); +pub static DEMO_INSTANCE_UPDATE: Lazy = + Lazy::new(|| params::InstanceUpdate { boot_disk: None }); // The instance needs a network interface, too. pub static DEMO_INSTANCE_NIC_NAME: Lazy = @@ -1797,6 +1800,9 @@ pub static VERIFY_ENDPOINTS: Lazy> = Lazy::new(|| { allowed_methods: vec![ AllowedMethod::Get, AllowedMethod::Delete, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_INSTANCE_UPDATE).unwrap() + ), ], }, diff --git a/nexus/tests/integration_tests/external_ips.rs b/nexus/tests/integration_tests/external_ips.rs index c7e593e6a1..15fcadb937 100644 --- a/nexus/tests/integration_tests/external_ips.rs +++ b/nexus/tests/integration_tests/external_ips.rs @@ -1000,6 +1000,7 @@ async fn test_floating_ip_attach_fail_between_projects( floating_ip: fip.identity.id.into(), }], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }, diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 9c8900b2d2..6a94a189bd 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -227,6 +227,7 @@ async fn test_create_instance_with_bad_hostname_impl( network_interfaces: Default::default(), external_ips: vec![], disks: vec![], + boot_disk: None, start: false, ssh_public_keys: None, auto_restart_policy: Default::default(), @@ -333,6 +334,7 @@ async fn test_instances_create_reboot_halt( params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), })) @@ -1922,6 +1924,7 @@ async fn test_instances_create_stopped_start( params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }, @@ -2102,6 +2105,7 @@ async fn test_instance_using_image_from_other_project_fails( size: ByteCount::from_gibibytes_u32(4), }, )], + boot_disk: None, start: true, auto_restart_policy: Default::default(), })) @@ -2166,6 +2170,7 @@ async fn test_instance_create_saga_removes_instance_database_record( network_interfaces: interface_params.clone(), external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -2195,6 +2200,7 @@ async fn test_instance_create_saga_removes_instance_database_record( network_interfaces: interface_params, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -2285,6 +2291,7 @@ async fn test_instance_with_single_explicit_ip_address( network_interfaces: interface_params, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), @@ -2402,6 +2409,7 @@ async fn test_instance_with_new_custom_network_interfaces( network_interfaces: interface_params, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -2518,6 +2526,7 @@ async fn test_instance_create_delete_network_interface( network_interfaces: params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -2763,6 +2772,7 @@ async fn test_instance_update_network_interfaces( network_interfaces: params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -3163,6 +3173,7 @@ async fn test_instance_with_multiple_nics_unwinds_completely( network_interfaces: interface_params, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -3216,6 +3227,8 @@ async fn test_attach_one_disk_to_instance(cptestctx: &ControlPlaneTestContext) { assert_eq!(disks.len(), 1); assert_eq!(disks[0].state, DiskState::Detached); + let disk_name = Name::try_from(String::from("probablydata")).unwrap(); + // Create the instance let instance_params = params::InstanceCreate { identity: IdentityMetadataCreateParams { @@ -3229,11 +3242,10 @@ async fn test_attach_one_disk_to_instance(cptestctx: &ControlPlaneTestContext) { ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: vec![params::InstanceDiskAttachment::Attach( - params::InstanceDiskAttach { - name: Name::try_from(String::from("probablydata")).unwrap(), - }, - )], + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { name: disk_name.clone() }, + )), + disks: Vec::new(), start: true, auto_restart_policy: Default::default(), }; @@ -3290,12 +3302,27 @@ async fn test_instance_create_attach_disks( ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], + boot_disk: Some(params::InstanceDiskAttachment::Create( + params::DiskCreate { + identity: IdentityMetadataCreateParams { + name: Name::try_from(String::from("created-disk")).unwrap(), + description: String::from( + "A boot disk that was created by instance create", + ), + }, + size: ByteCount::from_gibibytes_u32(4), + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + }, + )), disks: vec![ params::InstanceDiskAttachment::Create(params::DiskCreate { identity: IdentityMetadataCreateParams { - name: Name::try_from(String::from("created-disk")).unwrap(), + name: Name::try_from(String::from("created-disk2")) + .unwrap(), description: String::from( - "A disk that was created by instance create", + "A data disk that was created by instance create", ), }, size: ByteCount::from_gibibytes_u32(4), @@ -3305,7 +3332,7 @@ async fn test_instance_create_attach_disks( }), params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { - name: attachable_disk.identity.name, + name: attachable_disk.identity.name.clone(), }, ), ], @@ -3332,7 +3359,7 @@ async fn test_instance_create_attach_disks( .await .expect("failed to list disks") .all_items; - assert_eq!(disks.len(), 2); + assert_eq!(disks.len(), 3); for disk in disks { assert_eq!(disk.state, DiskState::Attached(instance.identity.id)); @@ -3406,6 +3433,7 @@ async fn test_instance_create_attach_disks_undo( params::InstanceDiskAttach { name: faulted_disk.identity.name }, ), ], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -3474,7 +3502,12 @@ async fn test_attach_eight_disks_to_instance( ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: (0..8) + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from("probablydata0".to_string()).unwrap(), + }, + )), + disks: (1..8) .map(|i| { params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { @@ -3556,7 +3589,12 @@ async fn test_cannot_attach_nine_disks_to_instance( ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: (0..9) + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from("probablydata0".to_string()).unwrap(), + }, + )), + disks: (1..9) .map(|i| { params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { @@ -3652,7 +3690,12 @@ async fn test_cannot_attach_faulted_disks(cptestctx: &ControlPlaneTestContext) { ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: (0..8) + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from("probablydata0".to_string()).unwrap(), + }, + )), + disks: (1..8) .map(|i| { params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { @@ -3737,7 +3780,12 @@ async fn test_disks_detached_when_instance_destroyed( ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: (0..8) + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from("probablydata0".to_string()).unwrap(), + }, + )), + disks: (1..8) .map(|i| { params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { @@ -3829,7 +3877,12 @@ async fn test_disks_detached_when_instance_destroyed( ssh_public_keys: None, network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], - disks: (0..8) + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from("probablydata0".to_string()).unwrap(), + }, + )), + disks: (1..8) .map(|i| { params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { @@ -3867,6 +3920,529 @@ async fn test_disks_detached_when_instance_destroyed( } } +// Surprising but true: mentioning a disk multiple times for attachment is just +// fine. This means that having a disk in the boot_disk field and disks list +// will succeed as well. +// +// Test here to ensure we're not caught by surprise if this behavior is changed, +// rather than to assert that this is a specific desired behavior. +#[nexus_test] +async fn test_duplicate_disk_attach_requests_ok( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Test pre-reqs + DiskTest::new(&cptestctx).await; + create_project_and_pool(&client).await; + + create_disk(&client, PROJECT_NAME, "probablydata").await; + create_disk(&client, PROJECT_NAME, "alsodata").await; + + // Verify disk is there and currently detached + let disks: Vec = + NexusRequest::iter_collection_authn(client, &get_disks_url(), "", None) + .await + .expect("failed to list disks") + .all_items; + assert_eq!(disks.len(), 2); + assert_eq!(disks[0].state, DiskState::Detached); + assert_eq!(disks[1].state, DiskState::Detached); + + // Create the instance with a duplicate disks entry + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "nfs".parse().unwrap(), + description: String::from("probably serving data"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "nfs".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + disks: vec![ + params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("probablydata")).unwrap(), + }, + ), + params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("probablydata")).unwrap(), + }, + ), + ], + boot_disk: None, + start: true, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to create instance"); + + let instance = response + .parsed_body::() + .expect("Failed to parse error response body"); + assert_eq!(instance.boot_disk_id, None); + + // Create the instance with a disk mentioned both as a data disk and a boot + // disk + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: "nfs2".parse().unwrap(), + description: String::from("probably serving data"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "nfs2".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("alsodata")).unwrap(), + }, + )), + disks: vec![params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("alsodata")).unwrap(), + }, + )], + start: true, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to create instance"); + + let instance = response + .parsed_body::() + .expect("Failed to parse error response body"); + let expected_disk = + disks.iter().find(|d| d.identity.name.as_str() == "alsodata").unwrap(); + assert_eq!(instance.boot_disk_id, Some(expected_disk.identity.id)); +} + +// Create an instance with a boot disk, try and fail to detach it, change the +// boot disk to something else, and succeed to detach the formerly-boot +// device. +#[nexus_test] +async fn test_cannot_detach_boot_disk(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let instance_name = "nifs"; + + // Test pre-reqs + DiskTest::new(&cptestctx).await; + create_project_and_pool(&client).await; + + // Create the "probablydata" disk + create_disk(&client, PROJECT_NAME, "probablydata0").await; + + // Create the instance + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: String::from("probably serving data"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "nfs".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("probablydata0")).unwrap(), + }, + )), + disks: Vec::new(), + start: false, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Expected instance creation to work!"); + + let instance = response.parsed_body::().unwrap(); + + // Verify disk is attached to the instance + let url_instance_disks = + format!("/v1/instances/{}/disks", instance.identity.id); + let disks: Vec = NexusRequest::iter_collection_authn( + client, + &url_instance_disks, + "", + None, + ) + .await + .expect("failed to list disks") + .all_items; + assert_eq!(disks.len(), 1); + assert_eq!(disks[0].state, DiskState::Attached(instance.identity.id)); + assert_eq!(instance.boot_disk_id, Some(disks[0].identity.id)); + + // Attempt to detach the instance's boot disk. This should fail. + let url_instance_detach_disk = + format!("/v1/instances/{}/disks/detach", instance.identity.id); + + let builder = RequestBuilder::new( + client, + http::Method::POST, + &url_instance_detach_disk, + ) + .body(Some(¶ms::DiskPath { disk: disks[0].identity.id.into() })) + .expect_status(Some(http::StatusCode::CONFLICT)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to detach boot disk"); + + let err = response + .parsed_body::() + .expect("Failed to parse error response body"); + assert_eq!(err.message, "boot disk cannot be detached"); + + // Change the instance's boot disk. + let url_instance_update = format!("/v1/instances/{}", instance.identity.id); + + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { boot_disk: None })) + .expect_status(Some(http::StatusCode::OK)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let instance = response.parsed_body::().unwrap(); + assert_eq!(instance.boot_disk_id, None); + + // Now try to detach `disks[0]` again. This should succeed. + let builder = RequestBuilder::new( + client, + http::Method::POST, + &url_instance_detach_disk, + ) + .body(Some(¶ms::DiskPath { disk: disks[0].identity.id.into() })) + .expect_status(Some(http::StatusCode::ACCEPTED)); + NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to detach boot disk"); +} + +#[nexus_test] +async fn test_updating_running_instance_is_conflict( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + let instance_name = "immediately-running"; + + create_project_and_pool(&client).await; + + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: Name::try_from(String::from(instance_name)).unwrap(), + description: String::from("instance to run and fail to update"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "inst".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + disks: vec![], + boot_disk: None, + start: true, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Expected instance creation to work!"); + + let instance = response.parsed_body::().unwrap(); + let instance_id = InstanceUuid::from_untyped_uuid(instance.identity.id); + + // The instance is technically updatable in the brief window that it is + // `Creating`. Wait for it to leave `Creating` to make sure we're in a + // non-updatable state before trying to update. + let nexus = &cptestctx.server.server_context().nexus; + instance_simulate(nexus, &instance_id).await; + instance_wait_for_state(client, instance_id, InstanceState::Running).await; + + let url_instance_update = format!("/v1/instances/{}", instance_id); + + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { boot_disk: None })) + .expect_status(Some(http::StatusCode::CONFLICT)); + + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let error = response.parsed_body::().unwrap(); + assert_eq!(error.message, "instance must be stopped to update"); +} + +#[nexus_test] +async fn test_updating_missing_instance_is_not_found( + cptestctx: &ControlPlaneTestContext, +) { + const UUID_THAT_DOESNT_EXIST: Uuid = + Uuid::from_u128(0x12341234_4321_8765_1234_432143214321); + let url_instance_update = + format!("/v1/instances/{}", UUID_THAT_DOESNT_EXIST); + + let client = &cptestctx.external_client; + + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { boot_disk: None })) + .expect_status(Some(http::StatusCode::NOT_FOUND)); + + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let error = response.parsed_body::().unwrap(); + assert_eq!( + error.message, + format!("not found: instance with id \"{}\"", UUID_THAT_DOESNT_EXIST) + ); +} + +// Create an instance with boot disk set to one of its attached disks, then set +// it to the other disk. +#[nexus_test] +async fn test_boot_disk_can_be_changed(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let instance_name = "nifs"; + + // Test pre-reqs + DiskTest::new(&cptestctx).await; + create_project_and_pool(&client).await; + + // Create the "probablydata" disk + create_disk(&client, PROJECT_NAME, "probablydata0").await; + create_disk(&client, PROJECT_NAME, "probablydata1").await; + + // Verify disks are there and currently detached + let disks: Vec = + NexusRequest::iter_collection_authn(client, &get_disks_url(), "", None) + .await + .expect("failed to list disks") + .all_items; + assert_eq!(disks.len(), 2); + assert_eq!(disks[0].state, DiskState::Detached); + assert_eq!(disks[1].state, DiskState::Detached); + + // Create the instance + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: String::from("probably serving data"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "nfs".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + boot_disk: Some(params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("probablydata0")).unwrap(), + }, + )), + disks: vec![params::InstanceDiskAttachment::Attach( + params::InstanceDiskAttach { + name: Name::try_from(String::from("probablydata1")).unwrap(), + }, + )], + start: false, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Expected instance creation to work!"); + + let instance = response.parsed_body::().unwrap(); + + assert_eq!(instance.boot_disk_id, Some(disks[0].identity.id)); + + // Change the instance's boot disk. + let url_instance_update = format!("/v1/instances/{}", instance.identity.id); + + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { + boot_disk: Some(disks[1].identity.id.into()), + })) + .expect_status(Some(http::StatusCode::OK)); + + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let instance = response.parsed_body::().unwrap(); + assert_eq!(instance.boot_disk_id, Some(disks[1].identity.id)); +} + +// Create an instance without a boot disk, fail to set the boot disk to a +// detached disk, then attach the disk and make it a boot disk. +#[nexus_test] +async fn test_boot_disk_must_be_attached(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + let instance_name = "nifs"; + + // Test pre-reqs + DiskTest::new(&cptestctx).await; + create_project_and_pool(&client).await; + + // Create the "probablydata" disk + create_disk(&client, PROJECT_NAME, "probablydata0").await; + + let disks: Vec = + NexusRequest::iter_collection_authn(client, &get_disks_url(), "", None) + .await + .expect("failed to list disks") + .all_items; + + // Create the instance + let instance_params = params::InstanceCreate { + identity: IdentityMetadataCreateParams { + name: instance_name.parse().unwrap(), + description: String::from("probably serving data"), + }, + ncpus: InstanceCpuCount::try_from(2).unwrap(), + memory: ByteCount::from_gibibytes_u32(4), + hostname: "nfs".parse().unwrap(), + user_data: vec![], + ssh_public_keys: None, + network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, + external_ips: vec![], + disks: vec![], + boot_disk: None, + start: false, + auto_restart_policy: Default::default(), + }; + + let builder = + RequestBuilder::new(client, http::Method::POST, &get_instances_url()) + .body(Some(&instance_params)) + .expect_status(Some(http::StatusCode::CREATED)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("Expected instance creation to work!"); + + let instance = response.parsed_body::().unwrap(); + + // Update the instance's boot disk to the unattached disk. This should fail. + let url_instance_update = format!("/v1/instances/{}", instance.identity.id); + + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { + boot_disk: Some(disks[0].identity.id.into()), + })) + .expect_status(Some(http::StatusCode::CONFLICT)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let error = + response.parsed_body::().unwrap(); + + assert_eq!(error.message, format!("boot disk must be attached")); + + // Now attach the disk. + let url_instance_detach_disk = + format!("/v1/instances/{}/disks/attach", instance.identity.id); + + let builder = RequestBuilder::new( + client, + http::Method::POST, + &url_instance_detach_disk, + ) + .body(Some(¶ms::DiskPath { disk: disks[0].identity.id.into() })) + .expect_status(Some(http::StatusCode::ACCEPTED)); + NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to detach boot disk"); + + // And now it can be made the boot disk. + let builder = + RequestBuilder::new(client, http::Method::PUT, &url_instance_update) + .body(Some(¶ms::InstanceUpdate { + boot_disk: Some(disks[0].identity.id.into()), + })) + .expect_status(Some(http::StatusCode::OK)); + let response = NexusRequest::new(builder) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .expect("can attempt to reconfigure the instance"); + + let instance = response.parsed_body::().unwrap(); + assert_eq!(instance.boot_disk_id, Some(disks[0].identity.id)); +} + // Tests that an instance is rejected if the memory is less than // MIN_MEMORY_BYTES_PER_INSTANCE #[nexus_test] @@ -3893,6 +4469,7 @@ async fn test_instances_memory_rejected_less_than_min_memory_size( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -3944,6 +4521,7 @@ async fn test_instances_memory_not_divisible_by_min_memory_size( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -3995,6 +4573,7 @@ async fn test_instances_memory_greater_than_max_size( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -4079,6 +4658,7 @@ async fn test_instance_create_with_ssh_keys( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, auto_restart_policy: Default::default(), }; @@ -4126,6 +4706,7 @@ async fn test_instance_create_with_ssh_keys( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, auto_restart_policy: Default::default(), }; @@ -4172,6 +4753,7 @@ async fn test_instance_create_with_ssh_keys( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, auto_restart_policy: Default::default(), }; @@ -4293,6 +4875,7 @@ async fn test_cannot_provision_instance_beyond_cpu_capacity( params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }; @@ -4350,6 +4933,7 @@ async fn test_cannot_provision_instance_beyond_cpu_limit( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }; @@ -4404,6 +4988,7 @@ async fn test_cannot_provision_instance_beyond_ram_capacity( params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }; @@ -4703,6 +5288,7 @@ async fn test_instance_ephemeral_ip_from_correct_pool( }], ssh_public_keys: None, disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -4771,6 +5357,7 @@ async fn test_instance_ephemeral_ip_from_orphan_pool( }], ssh_public_keys: None, disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -4833,6 +5420,7 @@ async fn test_instance_ephemeral_ip_no_default_pool_error( }], ssh_public_keys: None, disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -4969,6 +5557,7 @@ async fn test_instance_allow_only_one_ephemeral_ip( network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: vec![ephemeral_create.clone(), ephemeral_create], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; @@ -5101,6 +5690,7 @@ async fn test_instance_create_in_silo(cptestctx: &ControlPlaneTestContext) { pool: Some("default".parse::().unwrap().into()), }], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; diff --git a/nexus/tests/integration_tests/projects.rs b/nexus/tests/integration_tests/projects.rs index 6f9aca4a6e..d9752b1949 100644 --- a/nexus/tests/integration_tests/projects.rs +++ b/nexus/tests/integration_tests/projects.rs @@ -162,6 +162,7 @@ async fn test_project_deletion_with_instance( params::InstanceNetworkInterfaceAttachment::None, external_ips: vec![], disks: vec![], + boot_disk: None, start: false, auto_restart_policy: Default::default(), }, diff --git a/nexus/tests/integration_tests/quotas.rs b/nexus/tests/integration_tests/quotas.rs index 4ebf8e903e..b7c6c0e53f 100644 --- a/nexus/tests/integration_tests/quotas.rs +++ b/nexus/tests/integration_tests/quotas.rs @@ -86,6 +86,7 @@ impl ResourceAllocator { network_interfaces: params::InstanceNetworkInterfaceAttachment::Default, external_ips: Vec::::new(), disks: Vec::::new(), + boot_disk: None, start: false, auto_restart_policy: Default::default(), }, diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index 6d4cb9e368..bda07be057 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -1069,6 +1069,18 @@ const INSTANCE4: Uuid = Uuid::from_u128(0x44441257_5c3d_4647_83b0_8f3515da7be1); // "67060115" -> "Prop"olis const PROPOLIS: Uuid = Uuid::from_u128(0x11116706_5c3d_4647_83b0_8f3515da7be1); +// "7154"-> "Disk" +const DISK1: Uuid = Uuid::from_u128(0x11117154_5c3d_4647_83b0_8f3515da7be1); +const DISK2: Uuid = Uuid::from_u128(0x22227154_5c3d_4647_83b0_8f3515da7be1); +const DISK3: Uuid = Uuid::from_u128(0x33337154_5c3d_4647_83b0_8f3515da7be1); +const DISK4: Uuid = Uuid::from_u128(0x44447154_5c3d_4647_83b0_8f3515da7be1); + +// "566F" -> "Vo"lume. V is difficult, OK? +const VOLUME1: Uuid = Uuid::from_u128(0x1111566f_5c3d_4647_83b0_8f3515da7be1); +const VOLUME2: Uuid = Uuid::from_u128(0x2222566f_5c3d_4647_83b0_8f3515da7be1); +const VOLUME3: Uuid = Uuid::from_u128(0x3333566f_5c3d_4647_83b0_8f3515da7be1); +const VOLUME4: Uuid = Uuid::from_u128(0x4444566f_5c3d_4647_83b0_8f3515da7be1); + fn before_23_0_0(client: &Client) -> BoxFuture<'_, ()> { Box::pin(async move { // Create two silos @@ -1461,6 +1473,98 @@ fn after_101_0_0(client: &Client) -> BoxFuture<'_, ()> { ); }) } + +fn before_107_0_0(client: &Client) -> BoxFuture<'_, ()> { + Box::pin(async { + // An instance with no attached disks (4) gets a NULL boot disk. + // An instance with one attached disk (5) gets that disk as a boot disk. + // An instance with two attached disks (6) gets a NULL boot disk. + client + .batch_execute(&format!( + " + INSERT INTO disk ( + id, name, description, time_created, + time_modified, time_deleted, rcgen, project_id, + volume_id, disk_state, attach_instance_id, state_generation, + slot, time_state_updated, size_bytes, block_size, + origin_snapshot, origin_image, pantry_address + ) VALUES + + ('{DISK1}', 'disk1', '', now(), + now(), NULL, 1, '{PROJECT}', + '{VOLUME1}', 'attached', '{INSTANCE1}', 1, + 4, now(), 65536, '512', + NULL, NULL, NULL), + ('{DISK2}', 'disk2', '', now(), + now(), NULL, 1, '{PROJECT}', + '{VOLUME2}', 'attached', '{INSTANCE2}', 1, + 4, now(), 65536, '512', + NULL, NULL, NULL), + ('{DISK3}', 'disk3', '', now(), + now(), NULL, 1,'{PROJECT}', + '{VOLUME3}', 'attached', '{INSTANCE3}', 1, + 4, now(), 65536, '512', + NULL, NULL, NULL), + ('{DISK4}', 'disk4', '', now(), + now(), NULL, 1,'{PROJECT}', + '{VOLUME4}', 'attached', '{INSTANCE3}', 1, + 4, now(), 65536, '512', + NULL, NULL, NULL);" + )) + .await + .expect("failed to create disks"); + }) +} + +fn after_107_0_0(client: &Client) -> BoxFuture<'_, ()> { + Box::pin(async { + let rows = client + .query("SELECT id, boot_disk_id FROM instance ORDER BY id;", &[]) + .await + .expect("failed to load instance boot disks"); + let boot_disks = process_rows(&rows); + + assert_eq!( + boot_disks[0].values, + vec![ + ColumnValue::new("id", INSTANCE1), + ColumnValue::new("boot_disk_id", DISK1), + ], + "instance {INSTANCE1} should have one attached disk that has been \ + made the boot disk" + ); + + assert_eq!( + boot_disks[1].values, + vec![ + ColumnValue::new("id", INSTANCE2), + ColumnValue::new("boot_disk_id", DISK2), + ], + "instance {INSTANCE2} should have a different attached disk that \ + has been made the boot disk" + ); + + assert_eq!( + boot_disks[2].values, + vec![ + ColumnValue::new("id", INSTANCE3), + ColumnValue::null("boot_disk_id"), + ], + "instance {INSTANCE3} should have two attached disks, neither the \ + the boot disk" + ); + + assert_eq!( + boot_disks[3].values, + vec![ + ColumnValue::new("id", INSTANCE4), + ColumnValue::null("boot_disk_id"), + ], + "instance {INSTANCE4} should have no attached disks, so \ + no boot disk" + ); + }) +} // Lazily initializes all migration checks. The combination of Rust function // pointers and async makes defining a static table fairly painful, so we're // using lazy initialization instead. @@ -1496,6 +1600,11 @@ fn get_migration_checks() -> BTreeMap { DataMigrationFns { before: Some(before_101_0_0), after: after_101_0_0 }, ); + map.insert( + SemverVersion(semver::Version::parse("107.0.0").unwrap()), + DataMigrationFns { before: Some(before_107_0_0), after: after_107_0_0 }, + ); + map } diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 3fa5c16f95..7543a74597 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -139,9 +139,10 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { ssh_public_keys: Some(Vec::new()), network_interfaces: params::InstanceNetworkInterfaceAttachment::None, - disks: vec![params::InstanceDiskAttachment::Attach( + boot_disk: Some(params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: base_disk_name.clone() }, - )], + )), + disks: Vec::new(), external_ips: vec![], start: true, auto_restart_policy: Default::default(), @@ -343,9 +344,10 @@ async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { ssh_public_keys: Some(Vec::new()), network_interfaces: params::InstanceNetworkInterfaceAttachment::None, - disks: vec![params::InstanceDiskAttachment::Attach( + boot_disk: Some(params::InstanceDiskAttachment::Attach( params::InstanceDiskAttach { name: base_disk_name.clone() }, - )], + )), + disks: Vec::new(), external_ips: vec![], start: false, auto_restart_policy: Default::default(), diff --git a/nexus/tests/integration_tests/subnet_allocation.rs b/nexus/tests/integration_tests/subnet_allocation.rs index c60284c6e2..f504d77180 100644 --- a/nexus/tests/integration_tests/subnet_allocation.rs +++ b/nexus/tests/integration_tests/subnet_allocation.rs @@ -62,6 +62,7 @@ async fn create_instance_expect_failure( network_interfaces, external_ips: vec![], disks: vec![], + boot_disk: None, start: true, auto_restart_policy: Default::default(), }; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 0cd370d425..8f0abd3d46 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -946,6 +946,16 @@ pub enum InstanceDiskAttachment { Attach(InstanceDiskAttach), } +impl InstanceDiskAttachment { + /// Get the name of the disk described by this attachment. + pub fn name(&self) -> Name { + match self { + Self::Create(create) => create.identity.name.clone(), + Self::Attach(InstanceDiskAttach { name }) => name.clone(), + } + } +} + #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct InstanceDiskAttach { /// A disk name to attach @@ -1022,6 +1032,22 @@ pub struct InstanceCreate { #[serde(default)] pub disks: Vec, + /// The disk this instance should boot into. This disk can either be + /// attached if it already exists, or created, if it should be a new disk. + /// + /// It is strongly recommended to either provide a boot disk at instance + /// creation, or update the instance after creation to set a boot disk. + /// + /// An instance without an explicit boot disk can be booted: the options are + /// as managed by UEFI, and as controlled by the guest OS, but with some + /// risk. If this instance later has a disk attached or detached, it is + /// possible that boot options can end up reordered, with the intended boot + /// disk moved after the EFI shell in boot priority. This may result in an + /// instance that only boots to the EFI shell until the desired disk is set + /// as an explicit boot disk and the instance rebooted. + #[serde(default)] + pub boot_disk: Option, + /// An allowlist of SSH public keys to be transferred to the instance via /// cloud-init during instance creation. /// @@ -1043,6 +1069,15 @@ pub struct InstanceCreate { pub auto_restart_policy: Option, } +/// Parameters of an `Instance` that can be reconfigured after creation. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct InstanceUpdate { + /// Name or ID of the disk the instance should be instructed to boot from. + /// + /// If not provided, unset the instance's boot disk. + pub boot_disk: Option, +} + #[inline] fn bool_true() -> bool { true diff --git a/openapi/nexus-internal.json b/openapi/nexus-internal.json index f73c2d9bea..9e05ff72f9 100644 --- a/openapi/nexus-internal.json +++ b/openapi/nexus-internal.json @@ -3305,6 +3305,12 @@ "description": "`true` if this instance's auto-restart policy will permit the control plane to automatically restart it if it enters the `Failed` state.", "type": "boolean" }, + "boot_disk_id": { + "nullable": true, + "description": "the ID of the disk used to boot this Instance, if a specific one is assigned.", + "type": "string", + "format": "uuid" + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" diff --git a/openapi/nexus.json b/openapi/nexus.json index 86279ec4e4..74333bef82 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -1913,6 +1913,60 @@ } } }, + "put": { + "tags": [ + "instances" + ], + "summary": "Update instance", + "operationId": "instance_update", + "parameters": [ + { + "in": "query", + "name": "project", + "description": "Name or ID of the project", + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "path", + "name": "instance", + "description": "Name or ID of the instance", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/InstanceUpdate" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/Instance" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, "delete": { "tags": [ "instances" @@ -15102,6 +15156,12 @@ "description": "`true` if this instance's auto-restart policy will permit the control plane to automatically restart it if it enters the `Failed` state.", "type": "boolean" }, + "boot_disk_id": { + "nullable": true, + "description": "the ID of the disk used to boot this Instance, if a specific one is assigned.", + "type": "string", + "format": "uuid" + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" @@ -15222,6 +15282,16 @@ } ] }, + "boot_disk": { + "nullable": true, + "description": "The disk this instance should boot into. This disk can either be attached if it already exists, or created, if it should be a new disk.\n\nIt is strongly recommended to either provide a boot disk at instance creation, or update the instance after creation to set a boot disk.\n\nAn instance without an explicit boot disk can be booted: the options are as managed by UEFI, and as controlled by the guest OS, but with some risk. If this instance later has a disk attached or detached, it is possible that boot options can end up reordered, with the intended boot disk moved after the EFI shell in boot priority. This may result in an instance that only boots to the EFI shell until the desired disk is set as an explicit boot disk and the instance rebooted.", + "default": null, + "allOf": [ + { + "$ref": "#/components/schemas/InstanceDiskAttachment" + } + ] + }, "description": { "type": "string" }, @@ -15718,6 +15788,21 @@ } ] }, + "InstanceUpdate": { + "description": "Parameters of an `Instance` that can be reconfigured after creation.", + "type": "object", + "properties": { + "boot_disk": { + "nullable": true, + "description": "Name or ID of the disk the instance should be instructed to boot from.\n\nIf not provided, unset the instance's boot disk.", + "allOf": [ + { + "$ref": "#/components/schemas/NameOrId" + } + ] + } + } + }, "IpNet": { "x-rust-type": { "crate": "oxnet", diff --git a/openapi/sled-agent.json b/openapi/sled-agent.json index 734bcbf6d1..c6f029ea9a 100644 --- a/openapi/sled-agent.json +++ b/openapi/sled-agent.json @@ -1876,6 +1876,34 @@ } ] }, + "BootOrderEntry": { + "description": "An entry in a list of boot options.\n\n
JSON schema\n\n```json { \"description\": \"An entry in a list of boot options.\", \"type\": \"object\", \"required\": [ \"name\" ], \"properties\": { \"name\": { \"description\": \"The name of the device to attempt booting from.\", \"type\": \"string\" } } } ```
", + "type": "object", + "properties": { + "name": { + "description": "The name of the device to attempt booting from.", + "type": "string" + } + }, + "required": [ + "name" + ] + }, + "BootSettings": { + "description": "BootSettings\n\n
JSON schema\n\n```json { \"type\": \"object\", \"required\": [ \"order\" ], \"properties\": { \"order\": { \"type\": \"array\", \"items\": { \"$ref\": \"#/components/schemas/BootOrderEntry\" } } } } ```
", + "type": "object", + "properties": { + "order": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BootOrderEntry" + } + } + }, + "required": [ + "order" + ] + }, "BootstoreStatus": { "type": "object", "properties": { @@ -3219,6 +3247,14 @@ "description": "Describes the instance hardware.", "type": "object", "properties": { + "boot_settings": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/BootSettings" + } + ] + }, "cloud_init_bytes": { "nullable": true, "type": "string" diff --git a/package-manifest.toml b/package-manifest.toml index 0b31273988..9179212755 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -617,10 +617,10 @@ service_name = "propolis-server" only_for_targets.image = "standard" source.type = "prebuilt" source.repo = "propolis" -source.commit = "fae5334bcad5e864794332c6fed5e6bb9ec88831" +source.commit = "11371b0f3743f8df5b047dc0edc2699f4bdf3927" # The SHA256 digest is automatically posted to: # https://buildomat.eng.oxide.computer/public/file/oxidecomputer/propolis/image//propolis-server.sha256.txt -source.sha256 = "9701c9ef35950b13d69e7b6309c98b0c92f57b4262c7a536f046c909b2bedbd5" +source.sha256 = "07383cbad45bc032de1b65d3553839751fde96342cc76249ca4a45b89872aae9" output.type = "zone" [package.mg-ddm-gz] diff --git a/schema/crdb/add-instance-boot-disk/up01.sql b/schema/crdb/add-instance-boot-disk/up01.sql new file mode 100644 index 0000000000..5b970f09aa --- /dev/null +++ b/schema/crdb/add-instance-boot-disk/up01.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.instance ADD COLUMN IF NOT EXISTS boot_disk_id UUID; diff --git a/schema/crdb/add-instance-boot-disk/up02.sql b/schema/crdb/add-instance-boot-disk/up02.sql new file mode 100644 index 0000000000..fa49a37a99 --- /dev/null +++ b/schema/crdb/add-instance-boot-disk/up02.sql @@ -0,0 +1,50 @@ +SET LOCAL disallow_full_table_scans = off; + +-- Pick `boot_disk_id` values for instances, when we know doing so will not +-- risk adverse impact to the instance. +-- +-- Boot orders, before the change motivating this migration, are firstly +-- defined by disk PCI device ordering. But that initial order is then +-- persisted in user-provided EFI system partitions (ESP)s on those disks, +-- where guest OS code or guest OS operators may further change it. So, to know +-- exactly which disks an instance would try to boot, today, we would need to +-- do something like: +-- * figure out which, if any, OVMF would find an ESP +-- * this is probably "walk disks in PCI device order", but I'm not sure! +-- * find that ESP's persisted BootOrder +-- * walk devices (potentially by partition) according that boot order +-- * find whichever disk would actually get booted +-- * set `boot_disk_id` to this disk. +-- +-- "find" and "figure out" are very load-bearing in the above, but answers +-- probably could be determined by careful reading of EDK2. +-- +-- We would have to take such care in picking a `boot_disk_id` in this +-- migration because once we start setting `boot_disk_id` on an instance, and +-- that instance is booted, OVMF will persist a *new* boot order with the +-- indicated disk up at the front. *this will blow away whatever boot order was +-- present in the guest's ESP, potentially removing entries that named the +-- intended boot option*. +-- +-- So, when an instance has multiple attached disks, exactly which is *really* +-- booted is approximately a black box. It's *probably* the lowest PCI-numbered +-- disk, but we don't know without careful inspection that we don't know how to +-- do. If we pick wrong, the instance might be rendered unbootable without user +-- intervention. If we just don't pick, the instance was probably in a usable +-- state and can continue to be used. The safer option is to just not guess at +-- boot disks in these cases. +-- +-- If the instance only has one disk attached though, that's the sole disk it +-- would try booting, so just pick it here. That should be the majority of +-- instances anyway. +UPDATE instance SET boot_disk_id = d.id FROM ( + -- "min(id)" is incredibly gross, but because of `HAVING COUNT(*) = 1` + -- below, this is the minimum of a single id. There just needs to be some + -- aggregate function used to select the non-aggregated `id` from the group. + SELECT attach_instance_id, min(id) as id + FROM disk + WHERE disk_state = 'attached' AND attach_instance_id IS NOT NULL + GROUP BY attach_instance_id + HAVING COUNT(*) = 1 +) d +WHERE instance.id = d.attach_instance_id; diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index a8dd792922..a6cd9b38fe 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1108,7 +1108,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.instance ( * by the control plane. */ auto_restart_policy omicron.public.instance_auto_restart, - /* * The cooldown period that must elapse between consecutive auto restart * attempts. If this is NULL, no cooldown period is explicitly configured @@ -1116,6 +1115,14 @@ CREATE TABLE IF NOT EXISTS omicron.public.instance ( */ auto_restart_cooldown INTERVAL, + /* + * Which disk, if any, is the one this instance should be directed to boot + * from. With a boot device selected, guest OSes cannot configure their + * boot policy for future boots, so also permit NULL to indicate a guest + * does not want our policy, and instead should be permitted control over + * its boot-time fates. + */ + boot_disk_id UUID, CONSTRAINT vmm_iff_active_propolis CHECK ( ((state = 'vmm') AND (active_propolis_id IS NOT NULL)) OR @@ -4414,7 +4421,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '106.0.0', NULL) + (TRUE, NOW(), NOW(), '107.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/sled-agent/src/instance.rs b/sled-agent/src/instance.rs index 071c70a497..3d8441e514 100644 --- a/sled-agent/src/instance.rs +++ b/sled-agent/src/instance.rs @@ -336,6 +336,7 @@ struct InstanceRunner { // Disk related properties requested_disks: Vec, + boot_settings: Option, cloud_init_bytes: Option>, // Internal State management @@ -735,6 +736,7 @@ impl InstanceRunner { .cloned() .map(Into::into) .collect(), + boot_settings: self.boot_settings.clone(), migrate, cloud_init_bytes: self.cloud_init_bytes.clone().map(|x| x.0), }; @@ -1105,6 +1107,7 @@ impl Instance { dhcp_config, requested_disks: hardware.disks, cloud_init_bytes: hardware.cloud_init_bytes, + boot_settings: hardware.boot_settings, state: InstanceStates::new(vmm_runtime, migration_id), running_state: None, nexus_client, @@ -1836,6 +1839,7 @@ mod tests { search_domains: vec![], }, disks: vec![], + boot_settings: None, cloud_init_bytes: None, }; diff --git a/sled-agent/src/sim/sled_agent.rs b/sled-agent/src/sim/sled_agent.rs index 1acaff44db..0652c021cb 100644 --- a/sled-agent/src/sim/sled_agent.rs +++ b/sled-agent/src/sim/sled_agent.rs @@ -348,6 +348,7 @@ impl SledAgent { properties, nics: vec![], disks: vec![], + boot_settings: None, migrate: None, cloud_init_bytes: None, }; diff --git a/sled-agent/types/src/instance.rs b/sled-agent/types/src/instance.rs index 0f69db8bbb..39726030b0 100644 --- a/sled-agent/types/src/instance.rs +++ b/sled-agent/types/src/instance.rs @@ -61,6 +61,7 @@ pub struct InstanceHardware { pub dhcp_config: DhcpConfig, // TODO: replace `propolis_client::*` with locally-modeled request type pub disks: Vec, + pub boot_settings: Option, pub cloud_init_bytes: Option>, }