From 98bf71e423903df519a02f96102cad5f5cced70f Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Thu, 25 Sep 2025 01:20:25 +0000 Subject: [PATCH 1/7] [feat] Add IP pool multicast support This work introduces multicast IP pool capabilities to support external multicast traffic routing through the rack's switching infrastructure. Includes: - Add IpPoolType enum (unicast/multicast) with unicast as default - Add multicast pool fields: switch_port_uplinks (UUID[]), mvlan (VLAN ID) - Add database migration (multicast-support/up01.sql) with new columns and indexes - Add ASM/SSM range validation for multicast pools to prevent mixing - Add pool type-aware resolution for IP allocation - Add custom deserializer for switch port uplinks with deduplication - Update external API params/views for multicast pool configuration - Add SSM constants (IPV4_SSM_SUBNET, IPV6_SSM_FLAG_FIELD) for validation Database schema updates: - ip_pool table: pool_type, switch_port_uplinks, mvlan columns - Index on pool_type for efficient filtering - Migration preserves existing pools as unicast type by default This provides the foundation for multicast group functionality while maintaining full backward compatibility with existing unicast pools. References (for review): - RFD 488: https://rfd.shared.oxide.computer/rfd/488 - Dendrite PRs (based on recency): * https://github.com/oxidecomputer/dendrite/pull/132 * https://github.com/oxidecomputer/dendrite/pull/109 * https://github.com/oxidecomputer/dendrite/pull/14 --- Cargo.lock | 14 +- Cargo.toml | 2 +- common/src/address.rs | 12 + common/src/vlan.rs | 22 +- end-to-end-tests/src/bin/bootstrap.rs | 7 +- end-to-end-tests/src/bin/commtest.rs | 7 +- nexus/db-model/src/generation.rs | 2 + nexus/db-model/src/ip_pool.rs | 119 +++- nexus/db-model/src/schema_versions.rs | 3 +- .../src/db/datastore/external_ip.rs | 41 +- nexus/db-queries/src/db/datastore/ip_pool.rs | 540 ++++++++++++++++-- .../src/db/datastore/switch_port.rs | 40 ++ nexus/db-queries/src/db/on_conflict_ext.rs | 2 +- .../src/db/pub_test_utils/helpers.rs | 89 +++ .../db/queries/external_multicast_group.rs | 281 +++++++++ nexus/db-schema/src/enums.rs | 1 + nexus/db-schema/src/schema.rs | 3 + nexus/src/app/ip_pool.rs | 256 ++++++++- nexus/src/external_api/http_entrypoints.rs | 3 +- nexus/test-utils/src/resource_helpers.rs | 23 +- nexus/tests/integration_tests/endpoints.rs | 16 +- nexus/tests/integration_tests/instances.rs | 2 +- nexus/tests/integration_tests/ip_pools.rs | 537 ++++++++++++++++- nexus/types/src/external_api/deserializers.rs | 112 ++++ nexus/types/src/external_api/mod.rs | 1 + nexus/types/src/external_api/params.rs | 107 ++++ nexus/types/src/external_api/shared.rs | 16 + nexus/types/src/external_api/views.rs | 10 + openapi/nexus.json | 118 ++++ package-manifest.toml | 12 +- schema/crdb/dbinit.sql | 32 +- schema/crdb/multicast-pool-support/up01.sql | 30 + tools/dendrite_stub_checksums | 6 +- tools/dendrite_version | 2 +- 34 files changed, 2295 insertions(+), 173 deletions(-) create mode 100644 nexus/db-queries/src/db/queries/external_multicast_group.rs create mode 100644 nexus/types/src/external_api/deserializers.rs create mode 100644 schema/crdb/multicast-pool-support/up01.sql diff --git a/Cargo.lock b/Cargo.lock index a6ec05a8814..308c93e3e3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1699,7 +1699,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de#738c80d18d5e94eda367440ade7743e9d9f124de" +source = "git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7#6ba23e71121c196e1e3c4e0621ba7a6f046237c7" dependencies = [ "anyhow", "chrono", @@ -2894,11 +2894,11 @@ dependencies = [ [[package]] name = "dpd-client" version = "0.1.0" -source = "git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de#738c80d18d5e94eda367440ade7743e9d9f124de" +source = "git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7#6ba23e71121c196e1e3c4e0621ba7a6f046237c7" dependencies = [ "async-trait", "chrono", - "common 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de)", + "common 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7)", "crc8", "futures", "http", @@ -7949,7 +7949,7 @@ dependencies = [ "display-error-chain", "dns-server", "dns-service-client", - "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de)", + "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7)", "dropshot", "ereport-types", "expectorate", @@ -8420,7 +8420,7 @@ dependencies = [ "display-error-chain", "dns-server", "dns-service-client", - "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de)", + "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7)", "dropshot", "expectorate", "flate2", @@ -15503,7 +15503,7 @@ name = "wicket-common" version = "0.1.0" dependencies = [ "anyhow", - "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de)", + "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7)", "dropshot", "gateway-client", "gateway-types", @@ -15563,7 +15563,7 @@ dependencies = [ "clap", "debug-ignore", "display-error-chain", - "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=738c80d18d5e94eda367440ade7743e9d9f124de)", + "dpd-client 0.1.0 (git+https://github.com/oxidecomputer/dendrite?rev=6ba23e71121c196e1e3c4e0621ba7a6f046237c7)", "dropshot", "either", "expectorate", diff --git a/Cargo.toml b/Cargo.toml index ddffdcd0042..ec1f7e5a9ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -436,7 +436,7 @@ digest = "0.10.7" dns-server = { path = "dns-server" } dns-server-api = { path = "dns-server-api" } dns-service-client = { path = "clients/dns-service-client" } -dpd-client = { git = "https://github.com/oxidecomputer/dendrite", rev = "738c80d18d5e94eda367440ade7743e9d9f124de" } +dpd-client = { git = "https://github.com/oxidecomputer/dendrite", rev = "6ba23e71121c196e1e3c4e0621ba7a6f046237c7" } dropshot = { version = "0.16.3", features = [ "usdt-probes" ] } dyn-clone = "1.0.20" either = "1.15.0" diff --git a/common/src/address.rs b/common/src/address.rs index 3dfcdfb8d60..84e69c15af1 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -22,6 +22,18 @@ pub const AZ_PREFIX: u8 = 48; pub const RACK_PREFIX: u8 = 56; pub const SLED_PREFIX: u8 = 64; +// Multicast constants + +/// IPv4 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: +/// . +pub const IPV4_SSM_SUBNET: oxnet::Ipv4Net = + oxnet::Ipv4Net::new_unchecked(Ipv4Addr::new(232, 0, 0, 0), 8); + +/// IPv6 Source-Specific Multicast (SSM) flag field value as defined in RFC 4607: +/// . +/// This is the flags nibble (high nibble of second byte) for FF3x::/32 addresses. +pub const IPV6_SSM_FLAG_FIELD: u8 = 3; + /// maximum possible value for a tcp or udp port pub const MAX_PORT: u16 = u16::MAX; diff --git a/common/src/vlan.rs b/common/src/vlan.rs index 5e5765ffe20..67c9d4c343e 100644 --- a/common/src/vlan.rs +++ b/common/src/vlan.rs @@ -5,7 +5,9 @@ //! VLAN ID wrapper. use crate::api::external::Error; +use schemars::JsonSchema; use serde::Deserialize; +use serde::Serialize; use std::fmt; use std::str::FromStr; @@ -13,7 +15,8 @@ use std::str::FromStr; pub const VLAN_MAX: u16 = 4094; /// Wrapper around a VLAN ID, ensuring it is valid. -#[derive(Debug, Deserialize, Clone, Copy)] +#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Copy, JsonSchema)] +#[serde(rename = "VlanId")] pub struct VlanID(u16); impl VlanID { @@ -44,3 +47,20 @@ impl FromStr for VlanID { ) } } + +impl From for u16 { + fn from(vlan_id: VlanID) -> u16 { + vlan_id.0 + } +} + +impl slog::Value for VlanID { + fn serialize( + &self, + _record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + serializer.emit_u16(key, self.0) + } +} diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index 26f7a30dc16..a62d664decd 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -6,8 +6,8 @@ use end_to_end_tests::helpers::{ use omicron_test_utils::dev::poll::{CondCheckError, wait_for_condition}; use oxide_client::types::{ ByteCount, DeviceAccessTokenRequest, DeviceAuthRequest, DeviceAuthVerify, - DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpVersion, NameOrId, - SiloQuotasUpdate, + DiskCreate, DiskSource, IpPoolCreate, IpPoolLinkSilo, IpPoolType, + IpVersion, NameOrId, SiloQuotasUpdate, }; use oxide_client::{ ClientConsoleAuthExt, ClientDisksExt, ClientProjectsExt, @@ -53,6 +53,9 @@ async fn run_test() -> Result<()> { name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, + mvlan: None, + pool_type: IpPoolType::Unicast, + switch_port_uplinks: None, }) .send() .await?; diff --git a/end-to-end-tests/src/bin/commtest.rs b/end-to-end-tests/src/bin/commtest.rs index 1da1cd1c4df..6597d187b9f 100644 --- a/end-to-end-tests/src/bin/commtest.rs +++ b/end-to-end-tests/src/bin/commtest.rs @@ -7,8 +7,8 @@ use oxide_client::{ ClientSystemHardwareExt, ClientSystemIpPoolsExt, ClientSystemStatusExt, ClientVpcsExt, types::{ - IpPoolCreate, IpPoolLinkSilo, IpRange, IpVersion, Name, NameOrId, - PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, + IpPoolCreate, IpPoolLinkSilo, IpPoolType, IpRange, IpVersion, Name, + NameOrId, PingStatus, ProbeCreate, ProbeInfo, ProjectCreate, UsernamePasswordCredentials, }, }; @@ -295,6 +295,9 @@ async fn rack_prepare( name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, + mvlan: None, + pool_type: IpPoolType::Unicast, + switch_port_uplinks: None, }) .send() .await?; diff --git a/nexus/db-model/src/generation.rs b/nexus/db-model/src/generation.rs index 751cb98f3c7..c1b4fba62c5 100644 --- a/nexus/db-model/src/generation.rs +++ b/nexus/db-model/src/generation.rs @@ -8,6 +8,7 @@ use diesel::pg::Pg; use diesel::serialize::{self, ToSql}; use diesel::sql_types; use omicron_common::api::external; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; @@ -23,6 +24,7 @@ use std::convert::TryFrom; FromSqlRow, Serialize, Deserialize, + JsonSchema, )] #[diesel(sql_type = sql_types::BigInt)] #[repr(transparent)] diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 819be85ec8e..4728c97ae3c 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -5,6 +5,7 @@ //! Model types for IP Pools and the CIDR blocks therein. use crate::Name; +use crate::SqlU16; use crate::collection::DatastoreCollectionConfig; use crate::impl_enum_type; use chrono::DateTime; @@ -17,10 +18,10 @@ use nexus_db_schema::schema::ip_pool_range; use nexus_db_schema::schema::ip_pool_resource; use nexus_types::external_api::params; use nexus_types::external_api::shared; -use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; +use omicron_common::vlan::VlanID; use std::net::IpAddr; use uuid::Uuid; @@ -72,6 +73,24 @@ impl From for shared::IpVersion { } } +impl From for IpPoolType { + fn from(value: shared::IpPoolType) -> Self { + match value { + shared::IpPoolType::Unicast => Self::Unicast, + shared::IpPoolType::Multicast => Self::Multicast, + } + } +} + +impl From for shared::IpPoolType { + fn from(value: IpPoolType) -> Self { + match value { + IpPoolType::Unicast => Self::Unicast, + IpPoolType::Multicast => Self::Multicast, + } + } +} + /// An IP Pool is a collection of IP addresses external to the rack. /// /// IP pools can be external or internal. External IP pools can be associated @@ -82,16 +101,23 @@ impl From for shared::IpVersion { pub struct IpPool { #[diesel(embed)] pub identity: IpPoolIdentity, - /// The IP version of the pool. pub ip_version: IpVersion, - + /// Pool type for unicast (default) vs multicast pools. + pub pool_type: IpPoolType, + /// Switch port uplinks for multicast pools (array of switch port UUIDs). + /// Only applies to multicast pools, None for unicast pools. + pub switch_port_uplinks: Option>, + /// MVLAN ID for multicast pools. + /// Only applies to multicast pools, None for unicast pools. + pub mvlan: Option, /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, } impl IpPool { + /// Creates a new unicast (default) IP pool. pub fn new( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, @@ -102,6 +128,29 @@ impl IpPool { pool_identity.clone(), ), ip_version, + pool_type: IpPoolType::Unicast, + switch_port_uplinks: None, + mvlan: None, + rcgen: 0, + } + } + + /// Creates a new multicast IP pool. + pub fn new_multicast( + pool_identity: &external::IdentityMetadataCreateParams, + ip_version: IpVersion, + switch_port_uplinks: Option>, + mvlan: Option, + ) -> Self { + Self { + identity: IpPoolIdentity::new( + Uuid::new_v4(), + pool_identity.clone(), + ), + ip_version, + pool_type: IpPoolType::Multicast, + switch_port_uplinks, + mvlan: mvlan.map(|vid| u16::from(vid).into()), rcgen: 0, } } @@ -121,24 +170,55 @@ impl IpPool { impl From for views::IpPool { fn from(pool: IpPool) -> Self { - Self { identity: pool.identity(), ip_version: pool.ip_version.into() } + let identity = pool.identity(); + let pool_type = pool.pool_type; + + // Note: UUIDs expected to be converted to "switch.port" format in app + // layer, upon retrieval. + let switch_port_uplinks = match pool.switch_port_uplinks { + Some(uuid_list) => Some( + uuid_list.into_iter().map(|uuid| uuid.to_string()).collect(), + ), + None => None, + }; + + let mvlan = pool.mvlan.map(|vlan| vlan.into()); + + Self { + identity, + pool_type: pool_type.into(), + ip_version: pool.ip_version.into(), + switch_port_uplinks, + mvlan, + } } } -/// A set of updates to an IP Pool +/// A set of updates to an IP Pool. +/// +/// We do not modify the pool type after creation (e.g. unicast -> multicast or +/// vice versa), as that would require a migration of all associated resources. #[derive(AsChangeset)] #[diesel(table_name = ip_pool)] pub struct IpPoolUpdate { pub name: Option, pub description: Option, + /// Switch port uplinks for multicast pools (array of switch port UUIDs), + /// used for multicast traffic outbound from the rack to external networks. + pub switch_port_uplinks: Option>, + /// MVLAN ID for multicast pools. + pub mvlan: Option, pub time_modified: DateTime, } +// Used for unicast updates. impl From for IpPoolUpdate { fn from(params: params::IpPoolUpdate) -> Self { Self { name: params.identity.name.map(|n| n.into()), description: params.identity.description, + switch_port_uplinks: None, // no change + mvlan: None, // no change time_modified: Utc::now(), } } @@ -153,6 +233,25 @@ impl_enum_type!( Silo => b"silo" ); +impl_enum_type!( + IpPoolTypeEnum: + + #[derive(Clone, Copy, Debug, AsExpression, FromSqlRow, PartialEq)] + pub enum IpPoolType; + + Unicast => b"unicast" + Multicast => b"multicast" +); + +impl std::fmt::Display for IpPoolType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + IpPoolType::Unicast => write!(f, "unicast"), + IpPoolType::Multicast => write!(f, "multicast"), + } + } +} + #[derive(Queryable, Insertable, Selectable, Clone, Copy, Debug, PartialEq)] #[diesel(table_name = ip_pool_resource)] pub struct IpPoolResource { @@ -192,7 +291,7 @@ pub struct IpPoolRange { } impl IpPoolRange { - pub fn new(range: &IpRange, ip_pool_id: Uuid) -> Self { + pub fn new(range: &shared::IpRange, ip_pool_id: Uuid) -> Self { let now = Utc::now(); let first_address = range.first_address(); let last_address = range.last_address(); @@ -221,20 +320,20 @@ impl From for views::IpPoolRange { id: range.id, ip_pool_id: range.ip_pool_id, time_created: range.time_created, - range: IpRange::from(&range), + range: shared::IpRange::from(&range), } } } -impl From<&IpPoolRange> for IpRange { +impl From<&IpPoolRange> for shared::IpRange { fn from(range: &IpPoolRange) -> Self { let maybe_range = match (range.first_address.ip(), range.last_address.ip()) { (IpAddr::V4(first), IpAddr::V4(last)) => { - IpRange::try_from((first, last)) + shared::IpRange::try_from((first, last)) } (IpAddr::V6(first), IpAddr::V6(last)) => { - IpRange::try_from((first, last)) + shared::IpRange::try_from((first, last)) } (first, last) => { unreachable!( diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 7b5281a7a60..53f1f0a2335 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(193, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(194, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(194, "multicast-pool-support"), KnownVersion::new(193, "nexus-lockstep-port"), KnownVersion::new(192, "blueprint-source"), KnownVersion::new(191, "debug-log-blueprint-planner"), diff --git a/nexus/db-queries/src/db/datastore/external_ip.rs b/nexus/db-queries/src/db/datastore/external_ip.rs index 4aa91b46664..4ca47aa6df7 100644 --- a/nexus/db-queries/src/db/datastore/external_ip.rs +++ b/nexus/db-queries/src/db/datastore/external_ip.rs @@ -7,7 +7,6 @@ use super::DataStore; use super::SQL_BATCH_SIZE; use crate::authz; -use crate::authz::ApiResource; use crate::context::OpContext; use crate::db::collection_attach::AttachError; use crate::db::collection_attach::DatastoreAttachTarget; @@ -18,6 +17,7 @@ use crate::db::model::FloatingIp; use crate::db::model::IncompleteExternalIp; use crate::db::model::IpKind; use crate::db::model::IpPool; +use crate::db::model::IpPoolType; use crate::db::model::Name; use crate::db::pagination::Paginator; use crate::db::pagination::paginated; @@ -87,7 +87,9 @@ impl DataStore { probe_id: Uuid, pool: Option, ) -> CreateResult { - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = IncompleteExternalIp::for_ephemeral_probe( ip_id, probe_id, @@ -123,7 +125,9 @@ impl DataStore { // Naturally, we now *need* to destroy the ephemeral IP if the newly alloc'd // IP was not attached, including on idempotent success. - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = IncompleteExternalIp::for_ephemeral(ip_id, authz_pool.id()); // We might not be able to acquire a new IP, but in the event of an @@ -186,33 +190,6 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// If a pool is specified, make sure it's linked to this silo. If a pool is - /// not specified, fetch the default pool for this silo. Once the pool is - /// resolved (by either method) do an auth check. Then return the pool. - async fn resolve_pool_for_allocation( - &self, - opctx: &OpContext, - pool: Option, - ) -> LookupResult { - let authz_pool = match pool { - Some(authz_pool) => { - self.ip_pool_fetch_link(opctx, authz_pool.id()) - .await - .map_err(|_| authz_pool.not_found())?; - - authz_pool - } - // If no pool specified, use the default logic - None => { - let (authz_pool, ..) = - self.ip_pools_fetch_default(opctx).await?; - authz_pool - } - }; - opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; - Ok(authz_pool) - } - /// Allocates a floating IP address for instance usage. pub async fn allocate_floating_ip( &self, @@ -224,7 +201,9 @@ impl DataStore { ) -> CreateResult { let ip_id = Uuid::new_v4(); - let authz_pool = self.resolve_pool_for_allocation(opctx, pool).await?; + let authz_pool = self + .resolve_pool_for_allocation(opctx, pool, IpPoolType::Unicast) + .await?; let data = if let Some(ip) = ip { IncompleteExternalIp::for_floating_explicit( diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index f31ae4181fa..6c2c4ca5557 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -7,6 +7,7 @@ use super::DataStore; use super::SQL_BATCH_SIZE; use crate::authz; +use crate::authz::ApiResource; use crate::context::OpContext; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; @@ -19,6 +20,7 @@ use crate::db::model::IpPool; use crate::db::model::IpPoolRange; use crate::db::model::IpPoolResource; use crate::db::model::IpPoolResourceType; +use crate::db::model::IpPoolType; use crate::db::model::IpPoolUpdate; use crate::db::model::Name; use crate::db::pagination::Paginator; @@ -43,6 +45,7 @@ use nexus_db_model::IpVersion; use nexus_db_model::Project; use nexus_db_model::Vpc; use nexus_types::external_api::shared::IpRange; +use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_FLAG_FIELD}; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -98,18 +101,18 @@ const INTERNAL_SILO_DEFAULT_ERROR: &'static str = "The internal Silo cannot have a default IP Pool"; impl DataStore { - /// List IP Pools - pub async fn ip_pools_list( + async fn ip_pools_list_with_type( &self, opctx: &OpContext, pagparams: &PaginatedBy<'_>, + pool_type: Option, ) -> ListResultVec { use nexus_db_schema::schema::ip_pool; opctx .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) .await?; - match pagparams { + let mut q = match pagparams { PaginatedBy::Id(pagparams) => { paginated(ip_pool::table, ip_pool::id, pagparams) } @@ -118,14 +121,56 @@ impl DataStore { ip_pool::name, &pagparams.map_name(|n| Name::ref_cast(n)), ), + }; + + if let Some(pt) = pool_type { + q = q.filter(ip_pool::pool_type.eq(pt)); } - .filter(ip_pool::name.ne(SERVICE_IPV4_POOL_NAME)) - .filter(ip_pool::name.ne(SERVICE_IPV6_POOL_NAME)) - .filter(ip_pool::time_deleted.is_null()) - .select(IpPool::as_select()) - .get_results_async(&*self.pool_connection_authorized(opctx).await?) + + q.filter(ip_pool::name.ne(SERVICE_IPV4_POOL_NAME)) + .filter(ip_pool::name.ne(SERVICE_IPV6_POOL_NAME)) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// List IP Pools + pub async fn ip_pools_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type(opctx, pagparams, None).await + } + + /// List Multicast IP Pools + pub async fn ip_pools_list_multicast( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type( + opctx, + pagparams, + Some(IpPoolType::Multicast), + ) + .await + } + + /// List Unicast IP Pools + pub async fn ip_pools_list_unicast( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + self.ip_pools_list_with_type( + opctx, + pagparams, + Some(IpPoolType::Unicast), + ) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } /// Look up whether the given pool is available to users in the current @@ -160,14 +205,15 @@ impl DataStore { .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) } - /// Look up the default IP pool for the current silo. If there is no default - /// at silo scope, fall back to the next level up, namely the fleet default. - /// There should always be a default pool at the fleet level, though this - /// query can theoretically fail if someone is able to delete that pool or - /// make another one the default and delete that. - pub async fn ip_pools_fetch_default( + /// Look up the default IP pool for the current silo by pool type. + /// + /// Related to `ip_pools_fetch_default`, but this one allows you to specify + /// the pool type (unicast or multicast) to fetch the default pool of that + /// type. + async fn ip_pools_fetch_default_by_type( &self, opctx: &OpContext, + pool_type: IpPoolType, ) -> LookupResult<(authz::IpPool, IpPool)> { use nexus_db_schema::schema::ip_pool; use nexus_db_schema::schema::ip_pool_resource; @@ -183,8 +229,9 @@ impl DataStore { // .authorize(authz::Action::ListChildren, &authz::IP_POOL_LIST) // .await?; - let lookup_type = - LookupType::ByOther("default IP pool for current silo".to_string()); + let lookup_type = LookupType::ByOther(format!( + "default {pool_type} IP pool for current silo" + )); ip_pool::table .inner_join(ip_pool_resource::table) @@ -194,6 +241,8 @@ impl DataStore { .filter(ip_pool_resource::resource_id.eq(authz_silo_id)) .filter(ip_pool_resource::is_default.eq(true)) .filter(ip_pool::time_deleted.is_null()) + // Filter by pool type + .filter(ip_pool::pool_type.eq(pool_type)) // Order by most specific first so we get the most specific. // resource_type is an enum in the DB and therefore gets its order // from the definition; it's not lexicographic. So correctness here @@ -239,8 +288,77 @@ impl DataStore { }) } - /// Look up IP pool intended for internal services by their well-known - /// names. There are separate IP Pools for IPv4 and IPv6 address ranges. + /// Look up the default IP pool for the current silo. If there is no default + /// at silo scope, fall back to the next level up, namely the fleet default. + /// + /// There should always be a default pool at the fleet level, though this + /// query can theoretically fail if someone is able to delete that pool or + /// make another one the default and delete that. + pub async fn ip_pools_fetch_default( + &self, + opctx: &OpContext, + ) -> LookupResult<(authz::IpPool, IpPool)> { + // Default to unicast pools (existing behavior) + self.ip_pools_fetch_default_by_type(opctx, IpPoolType::Unicast).await + } + + /// Pool resolution for allocation by pool type. + /// + /// If pool is provided, validate it's linked to this silo and is of the + /// correct type. If no pool is provided, fetch the default pool of the + /// specified type for this silo. Once the pool is resolved (by either + /// method) do an auth check. Then return the pool. + pub async fn resolve_pool_for_allocation( + &self, + opctx: &OpContext, + pool: Option, + pool_type: IpPoolType, + ) -> LookupResult { + use nexus_db_schema::schema::ip_pool; + + let authz_pool = match pool { + Some(authz_pool) => { + self.ip_pool_fetch_link(opctx, authz_pool.id()) + .await + .map_err(|_| authz_pool.not_found())?; + + let pool_record = { + ip_pool::table + .filter(ip_pool::id.eq(authz_pool.id())) + .filter(ip_pool::time_deleted.is_null()) + .select(IpPool::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .map_err(|_| authz_pool.not_found())? + }; + + // Verify it's the correct pool type + if pool_record.pool_type != pool_type { + return Err(Error::invalid_request(&format!( + "Pool '{}' is not a {} pool (type: {})", + pool_record.identity.name, + pool_type, + pool_record.pool_type + ))); + } + + authz_pool + } + // If no pool specified, use the default pool of the specified type + None => { + let (authz_pool, ..) = self + .ip_pools_fetch_default_by_type(opctx, pool_type) + .await?; + authz_pool + } + }; + opctx.authorize(authz::Action::CreateChild, &authz_pool).await?; + Ok(authz_pool) + } + + /// Look up IP pool intended for internal services by its well-known name. /// /// This method may require an index by Availability Zone in the future. pub async fn ip_pools_service_lookup( @@ -1193,6 +1311,14 @@ impl DataStore { ))); } + // For multicast pools, validate ASM/SSM separation + if pool.pool_type == IpPoolType::Multicast { + Self::validate_multicast_pool_range_consistency_on_conn( + conn, authz_pool, range, + ) + .await?; + } + let new_range = IpPoolRange::new(range, pool_id); let filter_subquery = FilterOverlappingIpRanges { range: new_range }; let insert_query = @@ -1311,20 +1437,121 @@ impl DataStore { )) } } + + /// Validate that a new range being added to a multicast pool is consistent + /// with existing ranges in the pool, i.e., that we don't mix ASM and SSM + /// ranges in the same pool. + /// + /// Takes in a connection so it can be called from within a + /// transaction context. + async fn validate_multicast_pool_range_consistency_on_conn( + conn: &async_bb8_diesel::Connection, + authz_pool: &authz::IpPool, + range: &IpRange, + ) -> Result<(), Error> { + use nexus_db_schema::schema::ip_pool_range::dsl; + + let new_range_is_ssm = match range { + IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + IPV4_SSM_SUBNET.contains(first) + } + IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + // Check if the flag field (second nibble) is 3 for SSM + let flag_field = (first.octets()[1] & 0xF0) >> 4; + flag_field == IPV6_SSM_FLAG_FIELD + } + }; + + // Query existing ranges within THIS pool only + let existing_ranges: Vec = dsl::ip_pool_range + .filter(dsl::ip_pool_id.eq(authz_pool.id())) + .filter(dsl::time_deleted.is_null()) + .get_results_async(conn) + .await + .map_err(|e| { + Error::internal_error(&format!( + "Failed to fetch existing IP pool ranges: {}", + e + )) + })?; + + // Check if any existing range conflicts with the new range type + for existing_range in &existing_ranges { + let existing_is_ssm = match &existing_range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => { + // Check if the flag field (second nibble) is 3 for SSM + let flag_field = (net.network().octets()[1] & 0xF0) >> 4; + flag_field == IPV6_SSM_FLAG_FIELD + } + }; + + // If we have a mix of ASM and SSM within this pool, reject + if new_range_is_ssm != existing_is_ssm { + let new_type = if new_range_is_ssm { "SSM" } else { "ASM" }; + let existing_type = if existing_is_ssm { "SSM" } else { "ASM" }; + return Err(Error::invalid_request(&format!( + "Cannot mix {new_type} and {existing_type} ranges in multicast pool. \ + {new_type} ranges (IPv4 232/8, IPv6 FF3x::/32) and \ + {existing_type} ranges (IPv4 224/4, IPv6 FF0x-FF2x::/32) must be in separate pools." + ))); + } + } + + Ok(()) + } + + /// Determine whether a multicast IP pool is SSM (true) or ASM (false). + /// Assumes pools are range-consistent (validated on range insertion). + pub async fn multicast_pool_is_ssm( + &self, + opctx: &OpContext, + pool_id: Uuid, + ) -> LookupResult { + use nexus_db_schema::schema::ip_pool_range::dsl; + + // Fetch any active range for the pool. Validation at insert time + // guarantees consistency across ranges in a multicast pool. + let range = dsl::ip_pool_range + .filter(dsl::ip_pool_id.eq(pool_id)) + .filter(dsl::time_deleted.is_null()) + .select(IpPoolRange::as_select()) + .first_async::( + &*self.pool_connection_authorized(opctx).await?, + ) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let Some(range) = range else { + return Err(Error::insufficient_capacity( + "No IP ranges available in multicast pool", + "multicast pool has no active ranges", + )); + }; + + let is_ssm = match range.first_address { + IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), + IpNetwork::V6(net) => { + // Check if the flag field (second nibble) is 3 for SSM + let flags = (net.network().octets()[1] & 0xF0) >> 4; + flags == IPV6_SSM_FLAG_FIELD + } + }; + + Ok(is_ssm) + } } #[cfg(test)] mod test { + use std::net::{Ipv4Addr, Ipv6Addr}; use std::num::NonZeroU32; - use crate::authz; - use crate::db::datastore::ip_pool::INTERNAL_SILO_DEFAULT_ERROR; - use crate::db::model::{ - IpPool, IpPoolResource, IpPoolResourceType, Project, - }; - use crate::db::pub_test_utils::TestDatabase; use assert_matches::assert_matches; - use nexus_db_model::{IpPoolIdentity, IpVersion}; + use nexus_db_model::{IpPoolIdentity, IpPoolType, IpVersion}; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::address::{IpRange, Ipv4Range, Ipv6Range}; @@ -1335,6 +1562,13 @@ mod test { use omicron_test_utils::dev; use uuid::Uuid; + use crate::authz; + use crate::db::datastore::ip_pool::INTERNAL_SILO_DEFAULT_ERROR; + use crate::db::model::{ + IpPool, IpPoolResource, IpPoolResourceType, Project, + }; + use crate::db::pub_test_utils::TestDatabase; + #[tokio::test] async fn test_default_ip_pools() { let logctx = dev::test_setup_log("test_default_ip_pools"); @@ -1391,7 +1625,7 @@ mod test { .expect("Should list silo IP pools"); assert_eq!(silo_pools.len(), 0); - // make default should fail when there is no link yet + // Make default should fail when there is no link yet let authz_pool = authz::IpPool::new( authz::FLEET, pool1_for_silo.id(), @@ -1563,7 +1797,7 @@ mod test { } #[tokio::test] - async fn cannot_set_default_ip_pool_for_internal_silo() { + async fn test_cannot_set_default_ip_pool_for_internal_silo() { let logctx = dev::test_setup_log("cannot_set_default_ip_pool_for_internal_silo"); let db = TestDatabase::new_with_datastore(&logctx.log).await; @@ -1583,6 +1817,9 @@ mod test { ), ip_version, rcgen: 0, + pool_type: IpPoolType::Unicast, + mvlan: None, + switch_port_uplinks: None, }; let pool = datastore .ip_pool_create(&opctx, params) @@ -1690,8 +1927,8 @@ mod test { let range = IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ); @@ -1805,8 +2042,8 @@ mod test { // Add an IPv6 range let ipv6_range = IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 20), + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 0, 10), + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 20), ) .unwrap(), ); @@ -1852,8 +2089,8 @@ mod test { // add a giant range for fun let ipv6_range = IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), - std::net::Ipv6Addr::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), + Ipv6Addr::new( 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, ), ) @@ -1875,7 +2112,7 @@ mod test { } #[tokio::test] - async fn cannot_insert_range_in_pool_with_different_ip_version() { + async fn test_cannot_insert_range_in_pool_with_different_ip_version() { let logctx = dev::test_setup_log( "cannot_insert_range_in_pool_with_different_ip_version", ); @@ -1887,8 +2124,8 @@ mod test { let ranges = [ IpRange::V6( Ipv6Range::new( - std::net::Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), - std::net::Ipv6Addr::new( + Ipv6Addr::new(0xfd00, 0, 0, 0, 0, 0, 1, 21), + Ipv6Addr::new( 0xfd00, 0, 0, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, ), ) @@ -1896,8 +2133,8 @@ mod test { ), IpRange::V4( Ipv4Range::new( - std::net::Ipv4Addr::new(10, 0, 0, 1), - std::net::Ipv4Addr::new(10, 0, 0, 5), + Ipv4Addr::new(10, 0, 0, 1), + Ipv4Addr::new(10, 0, 0, 5), ) .unwrap(), ), @@ -1934,4 +2171,229 @@ mod test { db.terminate().await; logctx.cleanup_successful(); } + + #[tokio::test] + async fn test_multicast_ip_pool_basic_operations() { + let logctx = + dev::test_setup_log("test_multicast_ip_pool_basic_operations"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create a multicast IP pool + let identity = IdentityMetadataCreateParams { + name: "multicast-pool".parse().unwrap(), + description: "Test multicast IP pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&identity, IpVersion::V4, None, None), + ) + .await + .expect("Failed to create multicast IP pool"); + + let authz_silo = opctx.authn.silo_required().unwrap(); + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: true, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Failed to link IP pool to silo"); + + // Verify it's marked as multicast + assert_eq!(pool.pool_type, IpPoolType::Multicast); + + // Test multicast-specific listing + let pagparams_id = DataPageParams { + marker: None, + limit: NonZeroU32::new(100).unwrap(), + direction: dropshot::PaginationOrder::Ascending, + }; + let pagbyid = PaginatedBy::Id(pagparams_id); + + let multicast_pools = datastore + .ip_pools_list_multicast(&opctx, &pagbyid) + .await + .expect("Should list multicast IP pools"); + assert_eq!(multicast_pools.len(), 1); + assert_eq!(multicast_pools[0].id(), pool.id()); + + // Regular pool listing should also include it + let all_pools = datastore + .ip_pools_list(&opctx, &pagbyid) + .await + .expect("Should list all IP pools"); + assert_eq!(all_pools.len(), 1); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_multicast_ip_pool_default_by_type() { + let logctx = + dev::test_setup_log("test_multicast_ip_pool_default_by_type"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + let authz_silo = opctx.authn.silo_required().unwrap(); + + // Initially no default multicast pool + let error = datastore + .ip_pools_fetch_default_by_type(&opctx, IpPoolType::Multicast) + .await + .unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + // Create and link a multicast pool as default + let identity = IdentityMetadataCreateParams { + name: "default-multicast-pool".parse().unwrap(), + description: "Default multicast pool".to_string(), + }; + let pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast(&identity, IpVersion::V4, None, None), + ) + .await + .expect("Failed to create multicast IP pool"); + + let link = IpPoolResource { + ip_pool_id: pool.id(), + resource_type: IpPoolResourceType::Silo, + resource_id: authz_silo.id(), + is_default: true, + }; + datastore + .ip_pool_link_silo(&opctx, link) + .await + .expect("Could not link multicast pool to silo"); + + // Now should find the default multicast pool + let default_pool = datastore + .ip_pools_fetch_default_by_type(&opctx, IpPoolType::Multicast) + .await + .expect("Should find default multicast pool"); + assert_eq!(default_pool.1.id(), pool.id()); + assert_eq!(default_pool.1.pool_type, IpPoolType::Multicast); + + // Regular default should still fail (no unicast pool) + let error = datastore.ip_pools_fetch_default(&opctx).await.unwrap_err(); + assert_matches!(error, Error::ObjectNotFound { .. }); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_multicast_ip_pool_ranges() { + let logctx = dev::test_setup_log("test_multicast_ip_pool_ranges"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + + // Create IPv4 multicast IP pool + let ipv4_identity = IdentityMetadataCreateParams { + name: "multicast-ipv4-pool".parse().unwrap(), + description: "Test IPv4 multicast IP pool".to_string(), + }; + let ipv4_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &ipv4_identity, + IpVersion::V4, + None, + None, + ), + ) + .await + .expect("Failed to create IPv4 multicast IP pool"); + + let authz_ipv4_pool = authz::IpPool::new( + authz::FLEET, + ipv4_pool.id(), + LookupType::ById(ipv4_pool.id()), + ); + + // Add IPv4 multicast range (224.0.0.0/4) + let ipv4_range = IpRange::V4( + Ipv4Range::new( + Ipv4Addr::new(224, 1, 1, 1), + Ipv4Addr::new(224, 1, 1, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range( + &opctx, + &authz_ipv4_pool, + &ipv4_pool, + &ipv4_range, + ) + .await + .expect("Could not add IPv4 multicast range"); + + // Create IPv6 multicast IP pool + let ipv6_identity = IdentityMetadataCreateParams { + name: "multicast-ipv6-pool".parse().unwrap(), + description: "Test IPv6 multicast IP pool".to_string(), + }; + let ipv6_pool = datastore + .ip_pool_create( + &opctx, + IpPool::new_multicast( + &ipv6_identity, + IpVersion::V6, + None, + None, + ), + ) + .await + .expect("Failed to create IPv6 multicast IP pool"); + + let authz_ipv6_pool = authz::IpPool::new( + authz::FLEET, + ipv6_pool.id(), + LookupType::ById(ipv6_pool.id()), + ); + + // Add IPv6 multicast range (ff00::/8) + let ipv6_range = IpRange::V6( + Ipv6Range::new( + Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 1), + Ipv6Addr::new(0xff01, 0, 0, 0, 0, 0, 0, 10), + ) + .unwrap(), + ); + datastore + .ip_pool_add_range( + &opctx, + &authz_ipv6_pool, + &ipv6_pool, + &ipv6_range, + ) + .await + .expect("Could not add IPv6 multicast range"); + + // Check IPv4 pool capacity + let ipv4_capacity = datastore + .ip_pool_total_capacity(&opctx, &authz_ipv4_pool) + .await + .unwrap(); + assert_eq!(ipv4_capacity, 10); // 224.1.1.1 to 224.1.1.10 + + // Check IPv6 pool capacity + let ipv6_capacity = datastore + .ip_pool_total_capacity(&opctx, &authz_ipv6_pool) + .await + .unwrap(); + assert_eq!(ipv6_capacity, 10); // ff01::1 to ff01::a + + db.terminate().await; + logctx.cleanup_successful(); + } } diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index c4093806eda..9a756916e6b 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1138,6 +1138,46 @@ impl DataStore { Ok(id) } + /// Given a list of switch port UUIDs, return a list of strings in the + /// format ".". The order of the returned list + /// matches the order of the input UUIDs. + pub async fn switch_ports_from_ids( + &self, + opctx: &OpContext, + uplink_uuids: &[Uuid], + ) -> LookupResult> { + use nexus_db_schema::schema::switch_port::{ + self, dsl, port_name, switch_location, + }; + + if uplink_uuids.is_empty() { + return Ok(Vec::new()); + } + + let conn = self.pool_connection_authorized(opctx).await?; + let uplink_uuids_vec: Vec = uplink_uuids.to_vec(); + + // Maintain the order from the input UUIDs + let mut result = Vec::with_capacity(uplink_uuids.len()); + for uuid in uplink_uuids_vec.iter() { + let switch_port_info = dsl::switch_port + .filter(switch_port::id.eq(*uuid)) + .select((switch_location, port_name)) + .first_async::<(String, String)>(&*conn) + .await + .map_err(|_| { + Error::internal_error(&format!( + "Switch port UUID {uuid} not found", + )) + })?; + + result + .push(format!("{}.{}", switch_port_info.0, switch_port_info.1)); + } + + Ok(result) + } + pub async fn switch_ports_with_uplinks( &self, opctx: &OpContext, diff --git a/nexus/db-queries/src/db/on_conflict_ext.rs b/nexus/db-queries/src/db/on_conflict_ext.rs index 5f31eb99fb6..bcf9664b77e 100644 --- a/nexus/db-queries/src/db/on_conflict_ext.rs +++ b/nexus/db-queries/src/db/on_conflict_ext.rs @@ -293,7 +293,7 @@ pub trait IncompleteOnConflictExt { /// [the `filter` method]: /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/methods/trait.FilterDsl.html#tymethod.filter /// [`filter_target` method]: - /// https://docs.rs/diesel/2.1.4/diesel/query_dsl/trait.FilterTarget.html#tymethod.filter_targehttps://docs.rs/diesel/2.1.4/diesel/upsert/trait.DecoratableTarget.html#tymethod.filter_targett + /// https://docs.rs/diesel/2.1.4/diesel/upsert/trait.DecoratableTarget.html#tymethod.filter_target /// [`disallowed_methods`]: /// https://rust-lang.github.io/rust-clippy/master/index.html#disallowed_methods /// [principle of explosion]: diff --git a/nexus/db-queries/src/db/pub_test_utils/helpers.rs b/nexus/db-queries/src/db/pub_test_utils/helpers.rs index aaae77aeb78..f2e2d861be1 100644 --- a/nexus/db-queries/src/db/pub_test_utils/helpers.rs +++ b/nexus/db-queries/src/db/pub_test_utils/helpers.rs @@ -31,11 +31,13 @@ use nexus_db_model::SledUpdate; use nexus_db_model::Snapshot; use nexus_db_model::SnapshotIdentity; use nexus_db_model::SnapshotState; +use nexus_db_model::Vmm; use nexus_types::external_api::params; use nexus_types::identity::Resource; use omicron_common::api::external; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; +use omicron_uuid_kinds::PropolisUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::VolumeUuid; use std::net::Ipv6Addr; @@ -495,3 +497,90 @@ pub async fn create_project_image( .await .unwrap() } + +/// Create a VMM record for testing. +pub async fn create_vmm_for_instance( + opctx: &OpContext, + datastore: &DataStore, + instance_id: InstanceUuid, + sled_id: SledUuid, +) -> PropolisUuid { + let vmm_id = PropolisUuid::new_v4(); + let vmm = Vmm::new( + vmm_id, + instance_id, + sled_id, + "127.0.0.1".parse().unwrap(), // Test IP + 12400, // Test port + nexus_db_model::VmmCpuPlatform::SledDefault, // Test CPU platform + ); + datastore.vmm_insert(opctx, vmm).await.expect("Should create VMM"); + vmm_id +} + +/// Update instance runtime to point to a VMM. +pub async fn attach_instance_to_vmm( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + instance_id: InstanceUuid, + vmm_id: PropolisUuid, +) { + // Fetch current instance to get generation + let authz_instance = authz::Instance::new( + authz_project.clone(), + instance_id.into_untyped_uuid(), + external::LookupType::ById(instance_id.into_untyped_uuid()), + ); + let instance = datastore + .instance_refetch(opctx, &authz_instance) + .await + .expect("Should fetch instance"); + + datastore + .instance_update_runtime( + &instance_id, + &InstanceRuntimeState { + nexus_state: InstanceState::Vmm, + propolis_id: Some(vmm_id.into_untyped_uuid()), + dst_propolis_id: None, + migration_id: None, + gen: Generation::from(instance.runtime().gen.next()), + time_updated: Utc::now(), + time_last_auto_restarted: None, + }, + ) + .await + .expect("Should update instance runtime state"); +} + +/// Create an instance with an associated VMM (convenience function). +pub async fn create_instance_with_vmm( + opctx: &OpContext, + datastore: &DataStore, + authz_project: &authz::Project, + instance_name: &str, + sled_id: SledUuid, +) -> (InstanceUuid, PropolisUuid) { + let instance_id = create_stopped_instance_record( + opctx, + datastore, + authz_project, + instance_name, + ) + .await; + + let vmm_id = + create_vmm_for_instance(opctx, datastore, instance_id, sled_id).await; + + attach_instance_to_vmm( + opctx, + datastore, + authz_project, + instance_id, + vmm_id, + ) + .await; + + (instance_id, vmm_id) +} diff --git a/nexus/db-queries/src/db/queries/external_multicast_group.rs b/nexus/db-queries/src/db/queries/external_multicast_group.rs new file mode 100644 index 00000000000..79014b00df4 --- /dev/null +++ b/nexus/db-queries/src/db/queries/external_multicast_group.rs @@ -0,0 +1,281 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Implementation of queries for operating on external multicast groups from IP +//! Pools. +//! +//! Much of this is based on the external IP allocation code, with +//! modifications for multicast group semantics. + +use chrono::{DateTime, Utc}; +use diesel::pg::Pg; +use diesel::query_builder::{AstPass, Query, QueryFragment, QueryId}; +use diesel::{Column, QueryResult, RunQueryDsl, sql_types}; +use ipnetwork::IpNetwork; +use uuid::Uuid; + +use nexus_db_lookup::DbConnection; +use nexus_db_schema::schema; + +use crate::db::model::{ + ExternalMulticastGroup, Generation, IncompleteExternalMulticastGroup, + MulticastGroupState, Name, Vni, +}; +use crate::db::true_or_cast_error::matches_sentinel; + +const REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL: &'static str = + "Reallocation of multicast group with different configuration"; + +/// Translates a generic multicast group allocation error to an external error. +pub fn from_diesel( + e: diesel::result::Error, +) -> omicron_common::api::external::Error { + let sentinels = [REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL]; + if let Some(sentinel) = matches_sentinel(&e, &sentinels) { + match sentinel { + REALLOCATION_WITH_DIFFERENT_MULTICAST_GROUP_SENTINEL => { + return omicron_common::api::external::Error::invalid_request( + "Re-allocating multicast group with different configuration", + ); + } + // Fall-through to the generic error conversion. + _ => {} + } + } + + nexus_db_errors::public_error_from_diesel( + e, + nexus_db_errors::ErrorHandler::Server, + ) +} + +/// Query to allocate the next available external multicast group address from +/// IP pools. +/// +/// This query follows a similar pattern as [`super::external_ip::NextExternalIp`] but for multicast +/// addresses. +/// +/// It handles pool-based allocation, explicit address requests, and +/// idempotency. +pub struct NextExternalMulticastGroup { + group: IncompleteExternalMulticastGroup, + now: DateTime, +} + +impl NextExternalMulticastGroup { + pub fn new(group: IncompleteExternalMulticastGroup) -> Self { + let now = Utc::now(); + Self { group, now } + } + + fn push_next_multicast_ip_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + out.push_sql("SELECT "); + out.push_bind_param::(&self.group.id)?; + out.push_sql(" AS id, "); + + // Use provided name (now required via identity pattern) + out.push_bind_param::(&self.group.name)?; + out.push_sql(" AS name, "); + + // Use provided description (now required via identity pattern) + out.push_bind_param::( + &self.group.description, + )?; + out.push_sql(" AS description, "); + + out.push_bind_param::>( + &self.now, + )?; + out.push_sql(" AS time_created, "); + out.push_bind_param::>( + &self.now, + )?; + out.push_sql(" AS time_modified, "); + + out.push_bind_param::, Option>>(&None)?; + out.push_sql(" AS time_deleted, "); + + out.push_bind_param::(&self.group.project_id)?; + out.push_sql(" AS project_id, "); + + // Pool ID from the candidates subquery (like external IP) + out.push_sql("ip_pool_id, "); + + // Pool range ID from the candidates subquery + out.push_sql("ip_pool_range_id, "); + + // VNI + out.push_bind_param::(&self.group.vni)?; + out.push_sql(" AS vni, "); + + // The multicast IP comes from the candidates subquery + out.push_sql("candidate_ip AS multicast_ip, "); + + // Handle source IPs array + out.push_sql("ARRAY["); + for (i, source_ip) in self.group.source_ips.iter().enumerate() { + if i > 0 { + out.push_sql(", "); + } + out.push_bind_param::( + source_ip, + )?; + } + out.push_sql("]::inet[] AS source_ips, "); + + out.push_bind_param::, Option>(&None)?; + out.push_sql(" AS underlay_group_id, "); + + out.push_bind_param::(&self.group.rack_id)?; + out.push_sql(" AS rack_id, "); + + out.push_bind_param::, Option>(&self.group.tag)?; + out.push_sql(" AS tag, "); + + // New multicast groups start in "Creating" state (RPW pattern) + out.push_bind_param::(&MulticastGroupState::Creating)?; + out.push_sql(" AS state, "); + + out.push_sql("nextval('omicron.public.multicast_group_version') AS version_added, "); + out.push_bind_param::, Option>(&None)?; + out.push_sql(" AS version_removed"); + + // FROM the candidates subquery with LEFT JOIN (like external IP) + out.push_sql(" FROM ("); + self.push_address_candidates_subquery(out.reborrow())?; + out.push_sql(") LEFT OUTER JOIN "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql( + " ON (multicast_ip = candidate_ip AND time_deleted IS NULL)", + ); + out.push_sql( + " WHERE candidate_ip IS NOT NULL AND multicast_ip IS NULL LIMIT 1", + ); + + Ok(()) + } + + fn push_address_candidates_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + use schema::ip_pool_range::dsl; + + out.push_sql("SELECT "); + out.push_identifier(dsl::ip_pool_id::NAME)?; + out.push_sql(", "); + out.push_identifier(dsl::id::NAME)?; + out.push_sql(" AS ip_pool_range_id, "); + + // Handle explicit address vs automatic allocation + if let Some(explicit_addr) = &self.group.explicit_address { + out.push_sql("CASE "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" <= "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" AND "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" <= "); + out.push_identifier(dsl::last_address::NAME)?; + out.push_sql(" WHEN TRUE THEN "); + out.push_bind_param::(explicit_addr)?; + out.push_sql(" ELSE NULL END"); + } else { + // Generate series of candidate IPs (like external IP does) + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" + generate_series(0, "); + out.push_identifier(dsl::last_address::NAME)?; + out.push_sql(" - "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(")"); + } + + out.push_sql(" AS candidate_ip FROM "); + schema::ip_pool_range::table.walk_ast(out.reborrow())?; + out.push_sql(" WHERE "); + out.push_identifier(dsl::ip_pool_id::NAME)?; + out.push_sql(" = "); + out.push_bind_param::(&self.group.ip_pool_id)?; + out.push_sql(" AND "); + out.push_identifier(dsl::time_deleted::NAME)?; + out.push_sql(" IS NULL"); + + // Filter for multicast address ranges (224.0.0.0/4 for IPv4, + // ff00::/8 for IPv6) + out.push_sql(" AND ("); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" << '224.0.0.0/4'::inet OR "); + out.push_identifier(dsl::first_address::NAME)?; + out.push_sql(" << 'ff00::/8'::inet)"); + + Ok(()) + } + + fn push_prior_allocation_subquery<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> QueryResult<()> { + out.push_sql("SELECT * FROM "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql(" WHERE id = "); + out.push_bind_param::(&self.group.id)?; + out.push_sql(" AND time_deleted IS NULL"); + Ok(()) + } +} + +impl QueryFragment for NextExternalMulticastGroup { + fn walk_ast<'a>( + &'a self, + mut out: AstPass<'_, 'a, Pg>, + ) -> diesel::QueryResult<()> { + out.unsafe_to_cache_prepared(); + + // Create CTE for candidate multicast group + out.push_sql("WITH next_external_multicast_group AS ("); + self.push_next_multicast_ip_subquery(out.reborrow())?; + out.push_sql("), "); + + // Check for existing allocation (idempotency) + out.push_sql("previously_allocated_group AS ("); + self.push_prior_allocation_subquery(out.reborrow())?; + out.push_sql("), "); + + // Insert new record or return existing one + out.push_sql("multicast_group AS ("); + out.push_sql("INSERT INTO "); + schema::multicast_group::table.walk_ast(out.reborrow())?; + out.push_sql( + " (id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed) + SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM next_external_multicast_group + WHERE NOT EXISTS (SELECT 1 FROM previously_allocated_group) + RETURNING id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed", + ); + out.push_sql(") "); + + // Return either the newly inserted or previously allocated group + out.push_sql( + "SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM previously_allocated_group + UNION ALL + SELECT id, name, description, time_created, time_modified, time_deleted, project_id, ip_pool_id, ip_pool_range_id, vni, multicast_ip, source_ips, underlay_group_id, rack_id, tag, state, version_added, version_removed FROM multicast_group", + ); + + Ok(()) + } +} + +impl QueryId for NextExternalMulticastGroup { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +impl Query for NextExternalMulticastGroup { + type SqlType = <>::SelectExpression as diesel::Expression>::SqlType; +} + +impl RunQueryDsl for NextExternalMulticastGroup {} diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index 5ea98088306..bb10279a629 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -58,6 +58,7 @@ define_enums! { IpAttachStateEnum => "ip_attach_state", IpKindEnum => "ip_kind", IpPoolResourceTypeEnum => "ip_pool_resource_type", + IpPoolTypeEnum => "ip_pool_type", IpVersionEnum => "ip_version", MigrationStateEnum => "migration_state", NetworkInterfaceKindEnum => "network_interface_kind", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 777c4cb2dbb..1fcd15679f8 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -629,6 +629,9 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, + pool_type -> crate::enums::IpPoolTypeEnum, + switch_port_uplinks -> Nullable>, + mvlan -> Nullable, rcgen -> Int8, } } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index f51bc8a5541..3b40b6938ac 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -5,10 +5,15 @@ //! IP Pools, collections of external IP addresses for guest instances use crate::external_api::params; -use crate::external_api::shared::IpRange; +use crate::external_api::shared; +use crate::external_api::views; +use chrono::Utc; use ipnetwork::IpNetwork; use nexus_db_lookup::LookupPath; use nexus_db_lookup::lookup; +use nexus_db_model::IpPool; +use nexus_db_model::IpPoolType; +use nexus_db_model::IpPoolUpdate; use nexus_db_model::IpVersion; use nexus_db_queries::authz; use nexus_db_queries::authz::ApiResource; @@ -71,15 +76,45 @@ impl super::Nexus { &self, opctx: &OpContext, pool_params: ¶ms::IpPoolCreate, - ) -> CreateResult { - // https://github.com/oxidecomputer/omicron/issues/8966 + ) -> CreateResult { + // https://github.com/oxidecomputer/omicron/issues/8881 let ip_version = pool_params.ip_version.into(); - if matches!(ip_version, IpVersion::V6) { - return Err(Error::invalid_request( - "IPv6 pools are not yet supported", - )); - } - let pool = db::model::IpPool::new(&pool_params.identity, ip_version); + + let pool = match ( + pool_params.pool_type.clone(), + pool_params.switch_port_uplinks.is_some(), + ) { + (shared::IpPoolType::Unicast, true) => { + return Err(Error::invalid_request( + "switch_port_uplinks are only allowed for multicast IP pools", + )); + } + (shared::IpPoolType::Unicast, false) => { + if pool_params.mvlan.is_some() { + return Err(Error::invalid_request( + "mvlan is only allowed for multicast IP pools", + )); + } + IpPool::new(&pool_params.identity, ip_version) + } + (shared::IpPoolType::Multicast, _) => { + let switch_port_ids = self + .resolve_switch_port_ids( + opctx, + self.rack_id(), + &pool_params.switch_port_uplinks, + ) + .await?; + + IpPool::new_multicast( + &pool_params.identity, + ip_version, + switch_port_ids, + pool_params.mvlan, + ) + } + }; + self.db_datastore.ip_pool_create(opctx, pool).await } @@ -281,9 +316,23 @@ impl super::Nexus { return Err(not_found_from_lookup(pool_lookup)); } - self.db_datastore - .ip_pool_update(opctx, &authz_pool, updates.clone().into()) - .await + let switch_port_ids = self + .resolve_switch_port_ids( + opctx, + self.rack_id(), + &updates.switch_port_uplinks, + ) + .await?; + + let updates_db = IpPoolUpdate { + name: updates.identity.name.clone().map(Into::into), + description: updates.identity.description.clone(), + switch_port_uplinks: switch_port_ids, + mvlan: updates.mvlan.map(|vid| u16::from(vid).into()), + time_modified: Utc::now(), + }; + + self.db_datastore.ip_pool_update(opctx, &authz_pool, updates_db).await } pub(crate) async fn ip_pool_list_ranges( @@ -308,7 +357,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - range: &IpRange, + range: &shared::IpRange, ) -> UpdateResult { let (.., authz_pool, db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; @@ -326,12 +375,45 @@ impl super::Nexus { // pool utilization. // // See https://github.com/oxidecomputer/omicron/issues/8761. - if matches!(range, IpRange::V6(_)) { + if matches!(range, shared::IpRange::V6(_)) { return Err(Error::invalid_request( "IPv6 ranges are not allowed yet", )); } + let range_is_multicast = match range { + shared::IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + let last = v4_range.last_address(); + first.is_multicast() && last.is_multicast() + } + shared::IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + let last = v6_range.last_address(); + first.is_multicast() && last.is_multicast() + } + }; + + match db_pool.pool_type { + IpPoolType::Multicast => { + if !range_is_multicast { + return Err(Error::invalid_request( + "Cannot add unicast address range to multicast IP pool", + )); + } + + // For multicast pools, validate ASM/SSM separation + // This validation is done in the datastore layer + } + IpPoolType::Unicast => { + if range_is_multicast { + return Err(Error::invalid_request( + "Cannot add multicast address range to unicast IP pool", + )); + } + } + } + self.db_datastore .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) .await @@ -341,7 +423,7 @@ impl super::Nexus { &self, opctx: &OpContext, pool_lookup: &lookup::IpPool<'_>, - range: &IpRange, + range: &shared::IpRange, ) -> DeleteResult { let (.., authz_pool, _db_pool) = pool_lookup.fetch_for(authz::Action::Modify).await?; @@ -391,8 +473,14 @@ impl super::Nexus { pub(crate) async fn ip_pool_service_add_range( &self, opctx: &OpContext, - range: &IpRange, + range: &shared::IpRange, ) -> UpdateResult { + let (authz_pool, db_pool) = self + .db_datastore + .ip_pools_service_lookup(opctx, range.version().into()) + .await?; + opctx.authorize(authz::Action::Modify, &authz_pool).await?; + // Disallow V6 ranges until IPv6 is fully supported by the networking // subsystem. Instead of changing the API to reflect that (making this // endpoint inconsistent with the rest) and changing it back when we @@ -402,16 +490,43 @@ impl super::Nexus { // pool utilization. // // See https://github.com/oxidecomputer/omicron/issues/8761. - if matches!(range, IpRange::V6(_)) { + if matches!(range, shared::IpRange::V6(_)) { return Err(Error::invalid_request( "IPv6 ranges are not allowed yet", )); } - let (authz_pool, db_pool) = self - .db_datastore - .ip_pools_service_lookup(opctx, range.version().into()) - .await?; - opctx.authorize(authz::Action::Modify, &authz_pool).await?; + + // Validate that the range matches the pool type + let range_is_multicast = match range { + shared::IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + let last = v4_range.last_address(); + first.is_multicast() && last.is_multicast() + } + shared::IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + let last = v6_range.last_address(); + first.is_multicast() && last.is_multicast() + } + }; + + match db_pool.pool_type { + IpPoolType::Multicast => { + if !range_is_multicast { + return Err(Error::invalid_request( + "Cannot add unicast address range to multicast IP pool", + )); + } + } + IpPoolType::Unicast => { + if range_is_multicast { + return Err(Error::invalid_request( + "Cannot add multicast address range to unicast IP pool", + )); + } + } + } + self.db_datastore .ip_pool_add_range(opctx, &authz_pool, &db_pool, range) .await @@ -420,7 +535,7 @@ impl super::Nexus { pub(crate) async fn ip_pool_service_delete_range( &self, opctx: &OpContext, - range: &IpRange, + range: &shared::IpRange, ) -> DeleteResult { let (authz_pool, ..) = self .db_datastore @@ -429,4 +544,99 @@ impl super::Nexus { opctx.authorize(authz::Action::Modify, &authz_pool).await?; self.db_datastore.ip_pool_delete_range(opctx, &authz_pool, range).await } + + async fn resolve_switch_port_ids( + &self, + opctx: &OpContext, + rack_id: Uuid, + uplinks: &Option>, + ) -> Result>, Error> { + match uplinks { + None => Ok(None), + Some(list) => { + let mut ids = Vec::with_capacity(list.len()); + + for uplink in list { + let switch_location = + Name::from(uplink.switch_location.clone()); + let port_name = Name::from(uplink.port_name.clone()); + let id = self + .db_datastore + .switch_port_get_id( + opctx, + rack_id, + switch_location, + port_name, + ) + .await + .map_err(|_| { + Error::invalid_value( + "switch_port_uplinks", + format!("Switch port '{}' not found", uplink), + ) + })?; + ids.push(id); + } + Ok(Some(ids)) + } + } + } + + /// Convert IP pool with proper switch port name resolution in an async + /// context. + pub(crate) async fn ip_pool_to_view( + &self, + opctx: &OpContext, + pool: db::model::IpPool, + ) -> Result { + let identity = pool.identity(); + let pool_type = pool.pool_type; + + // Convert switch port UUIDs to "switch.port" format + let switch_port_uplinks = self + .resolve_switch_port_names(opctx, &pool.switch_port_uplinks) + .await?; + + let mvlan = pool.mvlan.map(|vlan| vlan.into()); + + Ok(views::IpPool { + identity, + ip_version: pool.ip_version.into(), + pool_type: pool_type.into(), + switch_port_uplinks, + mvlan, + }) + } + + // Convert switch port UUIDs to "switch.port" format for views + async fn resolve_switch_port_names( + &self, + opctx: &OpContext, + switch_port_ids: &Option>, + ) -> Result>, Error> { + match switch_port_ids { + None => Ok(None), + Some(ids) => { + let mut names = Vec::with_capacity(ids.len()); + for &id in ids { + let switch_port = self + .db_datastore + .switch_port_get(opctx, id) + .await + .map_err(|_| { + Error::internal_error(&format!( + "Switch port with ID {} not found", + id + )) + })?; + let name = format!( + "{}.{}", + switch_port.switch_location, switch_port.port_name + ); + names.push(name); + } + Ok(Some(names)) + } + } + } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 42e094ccc7b..7a9e207ce76 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1193,7 +1193,8 @@ impl NexusExternalApi for NexusExternalApiImpl { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; - Ok(HttpResponseCreated(IpPool::from(pool))) + let pool_view = nexus.ip_pool_to_view(&opctx, pool).await?; + Ok(HttpResponseCreated(pool_view)) }; apictx .context diff --git a/nexus/test-utils/src/resource_helpers.rs b/nexus/test-utils/src/resource_helpers.rs index 71871e932c8..5f9f59b039f 100644 --- a/nexus/test-utils/src/resource_helpers.rs +++ b/nexus/test-utils/src/resource_helpers.rs @@ -250,20 +250,17 @@ pub async fn create_ip_pool( pool_name: &str, ip_range: Option, ) -> (IpPool, IpPoolRange) { - let pool = object_create( - client, - "/v1/system/ip-pools", - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), - description: String::from("an ip pool"), - }, - ip_version: ip_range - .map(|r| r.version()) - .unwrap_or_else(views::IpVersion::v4), + let pool_params = params::IpPoolCreate::new( + IdentityMetadataCreateParams { + name: pool_name.parse().unwrap(), + description: String::from("an ip pool"), }, - ) - .await; + ip_range + .as_ref() + .map(|r| r.version()) + .unwrap_or_else(views::IpVersion::v4), + ); + let pool = object_create(client, "/v1/system/ip-pools", &pool_params).await; let ip_range = ip_range.unwrap_or_else(|| { use std::net::Ipv4Addr; diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index d8c62a95d94..3e8f4b503fd 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -928,12 +928,14 @@ pub const DEMO_IP_POOLS_URL: &'static str = "/v1/system/ip-pools"; pub static DEMO_IP_POOL_NAME: LazyLock = LazyLock::new(|| "default".parse().unwrap()); pub static DEMO_IP_POOL_CREATE: LazyLock = - LazyLock::new(|| params::IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: DEMO_IP_POOL_NAME.clone(), - description: String::from("an IP pool"), - }, - ip_version: IpVersion::V4, + LazyLock::new(|| { + params::IpPoolCreate::new( + IdentityMetadataCreateParams { + name: DEMO_IP_POOL_NAME.clone(), + description: String::from("an IP pool"), + }, + IpVersion::V4, + ) }); pub static DEMO_IP_POOL_PROJ_URL: LazyLock = LazyLock::new(|| { format!( @@ -951,6 +953,8 @@ pub static DEMO_IP_POOL_UPDATE: LazyLock = name: None, description: Some(String::from("a new IP pool")), }, + mvlan: None, + switch_port_uplinks: None, }); pub static DEMO_IP_POOL_SILOS_URL: LazyLock = LazyLock::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); diff --git a/nexus/tests/integration_tests/instances.rs b/nexus/tests/integration_tests/instances.rs index 65e5ede0a70..d0061f4c3a2 100644 --- a/nexus/tests/integration_tests/instances.rs +++ b/nexus/tests/integration_tests/instances.rs @@ -7163,7 +7163,7 @@ async fn test_instance_ephemeral_ip_no_default_pool_error( let url = format!("/v1/instances?project={}", PROJECT_NAME); let error = object_create_error(client, &url, &body, StatusCode::NOT_FOUND).await; - let msg = "not found: default IP pool for current silo".to_string(); + let msg = "not found: default unicast IP pool for current silo".to_string(); assert_eq!(error.message, msg); // same deal if you specify a pool that doesn't exist diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index fa6fa2839ec..fe86208bac8 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -43,6 +43,7 @@ use nexus_types::external_api::params::IpPoolCreate; use nexus_types::external_api::params::IpPoolLinkSilo; use nexus_types::external_api::params::IpPoolSiloUpdate; use nexus_types::external_api::params::IpPoolUpdate; +use nexus_types::external_api::shared::IpPoolType; use nexus_types::external_api::shared::IpRange; use nexus_types::external_api::shared::Ipv4Range; use nexus_types::external_api::shared::SiloIdentityMode; @@ -61,6 +62,7 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::NameOrId; use omicron_common::api::external::SimpleIdentityOrName; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; +use omicron_common::vlan::VlanID; use omicron_nexus::TestInterfaces; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; @@ -101,13 +103,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { // Create the pool, verify we can get it back by either listing or fetching // directly - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -125,13 +127,13 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { let error = object_create_error( client, ip_pools_url, - ¶ms::IpPoolCreate { - identity: IdentityMetadataCreateParams { + ¶ms::IpPoolCreate::new( + IdentityMetadataCreateParams { name: pool_name.parse().unwrap(), description: String::new(), }, - ip_version: IpVersion::V4, - }, + IpVersion::V4, + ), StatusCode::BAD_REQUEST, ) .await; @@ -175,6 +177,8 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: Some(String::from(new_pool_name).parse().unwrap()), description: None, }, + mvlan: None, + switch_port_uplinks: None, }; let modified_pool: IpPool = object_put(client, &ip_pool_url, &updates).await; @@ -382,6 +386,8 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { name: Some("test".parse().unwrap()), description: Some("test".to_string()), }, + mvlan: None, + switch_port_uplinks: None, }; let error = object_put_error( client, @@ -821,13 +827,13 @@ async fn create_pool( name: &str, ip_version: IpVersion, ) -> IpPool { - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { name: Name::try_from(name.to_string()).unwrap(), description: "".to_string(), }, ip_version, - }; + ); NexusRequest::objects_post(client, "/v1/system/ip-pools", ¶ms) .authn_as(AuthnMode::PrivilegedUser) .execute() @@ -948,13 +954,14 @@ async fn test_ip_pool_range_overlapping_ranges_fails( let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); // Create the pool, verify basic properties - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); + let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -1107,13 +1114,13 @@ async fn test_ip_pool_range_pagination(cptestctx: &ControlPlaneTestContext) { let ip_pool_add_range_url = format!("{}/add", ip_pool_ranges_url); // Create the pool, verify basic properties - let params = IpPoolCreate { - identity: IdentityMetadataCreateParams { - name: pool_name.parse().unwrap(), + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: String::from(pool_name).parse().unwrap(), description: String::from(description), }, - ip_version: IpVersion::V4, - }; + IpVersion::V4, + ); let created_pool: IpPool = object_create(client, ip_pools_url, ¶ms).await; assert_eq!(created_pool.identity.name, pool_name); @@ -1523,3 +1530,489 @@ fn assert_ranges_eq(first: &IpPoolRange, second: &IpPoolRange) { assert_eq!(first.range.first_address(), second.range.first_address()); assert_eq!(first.range.last_address(), second.range.last_address()); } + +fn assert_unicast_defaults(pool: &IpPool) { + assert_eq!(pool.pool_type, IpPoolType::Unicast); + assert!(pool.mvlan.is_none()); + assert!(pool.switch_port_uplinks.is_none()); +} + +#[nexus_test] +async fn test_ip_pool_unicast_defaults(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + // Test that regular IP pool creation uses unicast defaults + let pool = create_pool(client, "unicast-test", IpVersion::V4).await; + assert_unicast_defaults(&pool); + + // Test that explicitly creating with default type still works + let params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: "explicit-unicast".parse().unwrap(), + description: "Explicit unicast pool".to_string(), + }, + IpVersion::V4, + ); + let pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + assert_unicast_defaults(&pool); +} + +#[nexus_test] +async fn test_ip_pool_multicast_crud(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + // Create multicast IP pool + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "multicast-test".parse().unwrap(), + description: "Test multicast pool".to_string(), + }, + IpVersion::V4, + Some(vec!["switch0.qsfp0".parse().unwrap()]), + VlanID::new(100).ok(), + ); + + let pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + assert_eq!(pool.pool_type, IpPoolType::Multicast); + assert_eq!(pool.mvlan, Some(100u16)); + assert!(pool.switch_port_uplinks.is_some()); + let uplinks = pool.switch_port_uplinks.as_ref().unwrap(); + assert_eq!(uplinks.len(), 1); + // Verify view shows "switch.port" format + assert_eq!(uplinks[0], "switch0.qsfp0"); + + // Test update - change VLAN and remove uplinks + let updates = IpPoolUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("Updated multicast pool".to_string()), + }, + mvlan: VlanID::new(200).ok(), + switch_port_uplinks: Some(vec![]), // Remove all uplinks + }; + + let pool_url = "/v1/system/ip-pools/multicast-test"; + let updated_pool: IpPool = object_put(client, pool_url, &updates).await; + assert_eq!(updated_pool.mvlan, Some(200u16)); + let uplinks = updated_pool.switch_port_uplinks.as_ref().unwrap(); + assert_eq!(uplinks.len(), 0); // All uplinks removed + + // Note: Field clearing semantics would need to be tested separately + // as the update API uses None to mean "no change", not "clear field" +} + +#[nexus_test] +async fn test_ip_pool_multicast_ranges(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + // Create IPv4 multicast pool + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "multicast-ipv4".parse().unwrap(), + description: "IPv4 multicast pool".to_string(), + }, + IpVersion::V4, + None, + None, + ); + + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + let pool_url = "/v1/system/ip-pools/multicast-ipv4"; + let ranges_url = format!("{}/ranges/add", pool_url); + + // Add IPv4 multicast range (224.0.0.0/4) + let ipv4_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 1, 1, 1), + std::net::Ipv4Addr::new(224, 1, 1, 10), + ) + .unwrap(), + ); + + let created_range: IpPoolRange = + object_create(client, &ranges_url, &ipv4_range).await; + assert_eq!(ipv4_range.first_address(), created_range.range.first_address()); + assert_eq!(ipv4_range.last_address(), created_range.range.last_address()); + + // Verify utilization + assert_ip_pool_utilization(client, "multicast-ipv4", 0, 10.0).await; +} + +#[nexus_test] +async fn test_ip_pool_multicast_silo_linking( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create multicast pool + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "multicast-silo-test".parse().unwrap(), + description: "Multicast pool for silo linking".to_string(), + }, + IpVersion::V4, + None, + VlanID::new(300).ok(), + ); + + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + + // Create silo to link with + let silo = + create_silo(&client, "multicast-silo", true, SiloIdentityMode::SamlJit) + .await; + + // Link multicast pool to silo + link_ip_pool(client, "multicast-silo-test", &silo.id(), true).await; + + // Verify the link shows up correctly + let silo_pools = pools_for_silo(client, "multicast-silo").await; + assert_eq!(silo_pools.len(), 1); + assert_eq!(silo_pools[0].identity.name, "multicast-silo-test"); + // Note: SiloIpPool doesn't expose pool_type, would need separate lookup + assert!(silo_pools[0].is_default); + + // Verify pool shows linked silo + let linked_silos = silos_for_pool(client, "multicast-silo-test").await; + assert_eq!(linked_silos.items.len(), 1); + assert_eq!(linked_silos.items[0].silo_id, silo.id()); + assert!(linked_silos.items[0].is_default); +} + +#[nexus_test] +async fn test_ip_pool_mixed_unicast_multicast( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create one of each type + let unicast_pool = create_pool(client, "unicast", IpVersion::V4).await; + assert_unicast_defaults(&unicast_pool); + + let multicast_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "multicast".parse().unwrap(), + description: "Multicast pool".to_string(), + }, + IpVersion::V4, + Some(vec!["switch0.qsfp0".parse().unwrap()]), + VlanID::new(400).ok(), + ); + + let multicast_pool: IpPool = + object_create(client, "/v1/system/ip-pools", &multicast_params).await; + assert_eq!(multicast_pool.pool_type, IpPoolType::Multicast); + + // List all pools - should see both types + let all_pools = get_ip_pools(client).await; + assert_eq!(all_pools.len(), 2); + + // Verify each has correct type + for pool in all_pools { + match pool.identity.name.as_str() { + "unicast" => assert_unicast_defaults(&pool), + "multicast" => assert_eq!(pool.pool_type, IpPoolType::Multicast), + _ => panic!("Unexpected pool name: {}", pool.identity.name), + } + } +} + +#[nexus_test] +async fn test_ip_pool_unicast_rejects_multicast_fields( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Try to create unicast pool with multicast-only fields - should be rejected + let mut params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: "invalid-unicast".parse().unwrap(), + description: "Unicast pool with invalid multicast fields" + .to_string(), + }, + IpVersion::V4, + ); + params.mvlan = VlanID::new(100).ok(); // This should be rejected for unicast + + let error = object_create_error( + client, + "/v1/system/ip-pools", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("mvlan") + || error.message.contains("VLAN") + || error.message.contains("unicast") + ); + + // Try to create unicast pool with uplinks - should be rejected + let mut params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: "invalid-unicast2".parse().unwrap(), + description: "Unicast pool with uplinks".to_string(), + }, + IpVersion::V4, + ); + params.switch_port_uplinks = Some(vec!["switch0.qsfp0".parse().unwrap()]); + + let error = object_create_error( + client, + "/v1/system/ip-pools", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("uplink") + || error.message.contains("switch") + || error.message.contains("unicast") + ); + + // Both fields together should also fail + let mut params = IpPoolCreate::new( + IdentityMetadataCreateParams { + name: "invalid-unicast3".parse().unwrap(), + description: "Unicast pool with both invalid fields".to_string(), + }, + IpVersion::V4, + ); + params.mvlan = VlanID::new(200).ok(); + params.switch_port_uplinks = Some(vec!["switch0.qsfp0".parse().unwrap()]); + + let error = object_create_error( + client, + "/v1/system/ip-pools", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("unicast") + || error.message.contains("mvlan") + || error.message.contains("uplink") + ); +} + +#[nexus_test] +async fn test_ip_pool_multicast_invalid_vlan( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Test valid VLAN range first (to ensure we understand the API) + let valid_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "valid-vlan".parse().unwrap(), + description: "Multicast pool with valid VLAN".to_string(), + }, + IpVersion::V4, + None, + VlanID::new(100).ok(), + ); + + // This should succeed + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", &valid_params).await; + + // Now test edge cases - VLAN 4094 should be valid (at the boundary) + let boundary_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "boundary-vlan".parse().unwrap(), + description: "Multicast pool with boundary VLAN".to_string(), + }, + IpVersion::V4, + None, + VlanID::new(4094).ok(), + ); + + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", &boundary_params).await; +} + +#[nexus_test] +async fn test_ip_pool_multicast_invalid_uplinks( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Test with empty uplinks list + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "empty-uplinks".parse().unwrap(), + description: "Multicast pool with empty uplinks".to_string(), + }, + IpVersion::V4, + Some(vec![]), + VlanID::new(100).ok(), + ); + + // Empty list should be fine - just means no specific uplinks configured + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + + // Test with duplicate uplinks + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "duplicate-uplinks".parse().unwrap(), + description: "Multicast pool with duplicate uplinks".to_string(), + }, + IpVersion::V4, + Some(vec![ + "switch0.qsfp0".parse().unwrap(), + "switch0.qsfp0".parse().unwrap(), // Duplicate - should be automatically removed + ]), + VlanID::new(200).ok(), + ); + + // Duplicates should be automatically removed by the deserializer + let _pool: IpPool = + object_create(client, "/v1/system/ip-pools", ¶ms).await; + let uplinks = _pool.switch_port_uplinks.as_ref().unwrap(); + assert_eq!(uplinks.len(), 1); // Duplicate should be removed, only one entry + + // Test with non-existent switch port + let params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "invalid-switch-port".parse().unwrap(), + description: "Multicast pool with invalid switch port".to_string(), + }, + IpVersion::V4, + Some(vec!["switch1.qsfp0".parse().unwrap()]), // switch1 doesn't exist + VlanID::new(300).ok(), + ); + + // Should fail with 400 error about switch port not found + let error = object_create_error( + client, + "/v1/system/ip-pools", + ¶ms, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("switch1.qsfp0") + && error.message.contains("not found") + ); +} + +/// Test ASM/SSM multicast pool validation - ensure pools cannot mix ASM and SSM ranges +#[nexus_test] +async fn test_multicast_pool_asm_ssm_validation( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + // Create pure ASM multicast pool + let asm_pool_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "asm-pool".parse().unwrap(), + description: "Pure ASM multicast pool".to_string(), + }, + IpVersion::V4, + Some(vec!["switch0.qsfp0".parse().unwrap()]), + VlanID::new(100).ok(), + ); + let asm_pool: IpPool = + object_create(client, "/v1/system/ip-pools", &asm_pool_params).await; + + // Add ASM range (224.x.x.x) - should succeed + let asm_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 1, 0, 1), + std::net::Ipv4Addr::new(224, 1, 0, 50), + ) + .unwrap(), + ); + let add_asm_url = + format!("/v1/system/ip-pools/{}/ranges/add", asm_pool.identity.name); + object_create::(client, &add_asm_url, &asm_range) + .await; + + // Try to add SSM range (232.x.x.x) to ASM pool - should fail + let ssm_range = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(232, 1, 0, 1), + std::net::Ipv4Addr::new(232, 1, 0, 50), + ) + .unwrap(), + ); + let error = object_create_error( + client, + &add_asm_url, + &ssm_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("Cannot mix") + && error.message.contains("ASM") + && error.message.contains("SSM"), + "Expected ASM/SSM mixing error, got: {}", + error.message + ); + + // Create pure SSM multicast pool + let ssm_pool_params = IpPoolCreate::new_multicast( + IdentityMetadataCreateParams { + name: "ssm-pool".parse().unwrap(), + description: "Pure SSM multicast pool".to_string(), + }, + IpVersion::V4, + Some(vec!["switch0.qsfp0".parse().unwrap()]), + VlanID::new(200).ok(), + ); + let ssm_pool: IpPool = + object_create(client, "/v1/system/ip-pools", &ssm_pool_params).await; + + // Add SSM range (232.x.x.x) - should succeed + let add_ssm_url = + format!("/v1/system/ip-pools/{}/ranges/add", ssm_pool.identity.name); + object_create::(client, &add_ssm_url, &ssm_range) + .await; + + // Try to add ASM range (224.x.x.x) to SSM pool - should fail + let error = object_create_error( + client, + &add_ssm_url, + &asm_range, + StatusCode::BAD_REQUEST, + ) + .await; + assert!( + error.message.contains("Cannot mix") + && error.message.contains("ASM") + && error.message.contains("SSM"), + "Expected ASM/SSM mixing error, got: {}", + error.message + ); + + // Note: IPv6 multicast ranges are not yet supported in the system, + // so we focus on IPv4 validation for now + + // Verify that multiple ranges of the same type can be added + let asm_range2 = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(224, 2, 0, 1), + std::net::Ipv4Addr::new(224, 2, 0, 50), + ) + .unwrap(), + ); + object_create::(client, &add_asm_url, &asm_range2) + .await; + + let ssm_range2 = IpRange::V4( + Ipv4Range::new( + std::net::Ipv4Addr::new(232, 2, 0, 1), + std::net::Ipv4Addr::new(232, 2, 0, 50), + ) + .unwrap(), + ); + object_create::(client, &add_ssm_url, &ssm_range2) + .await; +} diff --git a/nexus/types/src/external_api/deserializers.rs b/nexus/types/src/external_api/deserializers.rs new file mode 100644 index 00000000000..cc802613f70 --- /dev/null +++ b/nexus/types/src/external_api/deserializers.rs @@ -0,0 +1,112 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Deserializer utilities for API parameter types + +use std::fmt; + +use serde::{ + Deserializer, + de::{self, Visitor}, +}; + +use crate::external_api::params::SwitchPortUplink; + +/// Deserializes an optional `Vec` into `Vec` with deduplication. +/// +/// This deserializer handles both string and object formats: +/// - String format: "switch0.qsfp0" (from real API calls) +/// - Object format: {"switch_location": "switch0", "port_name": "qsfp0"} (from test serialization) +/// +/// Duplicates are automatically removed based on the string representation. +pub fn parse_and_dedup_switch_port_uplinks<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + struct SwitchPortUplinksVisitor; + + impl<'de> Visitor<'de> for SwitchPortUplinksVisitor { + type Value = Option>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an optional array of switch port uplinks") + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(None) + } + + fn visit_unit(self) -> Result + where + E: de::Error, + { + Ok(None) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let vec = + deserializer.deserialize_seq(SwitchPortUplinksSeqVisitor)?; + Ok(Some(vec)) + } + } + + struct SwitchPortUplinksSeqVisitor; + + impl<'de> Visitor<'de> for SwitchPortUplinksSeqVisitor { + type Value = Vec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an array of switch port uplinks") + } + + fn visit_seq(self, mut seq: A) -> Result + where + A: de::SeqAccess<'de>, + { + let mut seen = std::collections::HashSet::new(); + let mut result = Vec::new(); + + while let Some(item) = seq.next_element::()? { + let uplink = match item { + // Handle string format: "switch0.qsfp0" + serde_json::Value::String(s) => { + if !seen.insert(s.clone()) { + continue; // Skip duplicate + } + s.parse::() + .map_err(|e| de::Error::custom(e))? + } + // Handle object format: {"switch_location": "switch0", "port_name": "qsfp0"} + serde_json::Value::Object(_) => { + let uplink: SwitchPortUplink = + serde_json::from_value(item) + .map_err(|e| de::Error::custom(e))?; + let uplink_str = uplink.to_string(); + if !seen.insert(uplink_str) { + continue; // Skip duplicate + } + uplink + } + _ => { + return Err(de::Error::custom( + "expected string or object", + )); + } + }; + result.push(uplink); + } + Ok(result) + } + } + + deserializer.deserialize_option(SwitchPortUplinksVisitor) +} diff --git a/nexus/types/src/external_api/mod.rs b/nexus/types/src/external_api/mod.rs index 363ddd3f41d..d2943fb157c 100644 --- a/nexus/types/src/external_api/mod.rs +++ b/nexus/types/src/external_api/mod.rs @@ -2,6 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +mod deserializers; pub mod headers; pub mod params; pub mod shared; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 8a1cd6f6fa7..39d090e5eec 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::{ Nullable, PaginationOrder, RouteDestination, RouteTarget, UserId, }; use omicron_common::disk::DiskVariant; +use omicron_common::vlan::VlanID; use omicron_uuid_kinds::*; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use parse_display::Display; @@ -1007,6 +1008,57 @@ pub struct IpPoolCreate { /// The default is IPv4. #[serde(default = "IpVersion::v4")] pub ip_version: IpVersion, + /// Type of IP pool (defaults to Unicast for backward compatibility) + #[serde(default)] + pub pool_type: shared::IpPoolType, + /// Rack switch uplinks that carry multicast traffic out of the rack to + /// external groups. Only applies to multicast pools; ignored for unicast + /// pools. + /// + /// Format: list of `.` strings (for example, `switch0.qsfp0`), + /// or objects with `switch_location` and `port_name`. + #[serde( + default, + skip_serializing_if = "Option::is_none", + deserialize_with = "crate::external_api::deserializers::parse_and_dedup_switch_port_uplinks" + )] + pub switch_port_uplinks: Option>, + /// VLAN ID for multicast pools. + /// Only applies to multicast pools, ignored for unicast pools. + #[serde(skip_serializing_if = "Option::is_none")] + pub mvlan: Option, +} + +impl IpPoolCreate { + /// Create parameters for a unicast IP pool (the default) + pub fn new( + identity: IdentityMetadataCreateParams, + ip_version: IpVersion, + ) -> Self { + Self { + identity, + ip_version, + pool_type: shared::IpPoolType::Unicast, + switch_port_uplinks: None, + mvlan: None, + } + } + + /// Create parameters for a multicast IP pool + pub fn new_multicast( + identity: IdentityMetadataCreateParams, + ip_version: IpVersion, + switch_port_uplinks: Option>, + mvlan: Option, + ) -> Self { + Self { + identity, + ip_version, + pool_type: shared::IpPoolType::Multicast, + switch_port_uplinks, + mvlan, + } + } } /// Parameters for updating an IP Pool @@ -1014,6 +1066,22 @@ pub struct IpPoolCreate { pub struct IpPoolUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, + /// Rack switch uplinks that carry multicast traffic out of the rack to + /// external groups. Only applies to multicast pools; ignored for unicast + /// pools. + /// + /// Format: list of `.` strings (for example, `switch0.qsfp0`), + /// or objects with `switch_location` and `port_name`. + #[serde( + default, + skip_serializing_if = "Option::is_none", + deserialize_with = "crate::external_api::deserializers::parse_and_dedup_switch_port_uplinks" + )] + pub switch_port_uplinks: Option>, + /// VLAN ID for multicast pools. + /// Only applies to multicast pools, ignored for unicast pools. + #[serde(skip_serializing_if = "Option::is_none")] + pub mvlan: Option, } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -2252,6 +2320,45 @@ pub struct SwitchPortPageSelector { pub switch_port_id: Option, } +/// Switch port uplink specification for multicast IP pools. +/// Combines switch location and port name in "switchN.portM" format. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct SwitchPortUplink { + /// Switch location (e.g., "switch0") + pub switch_location: Name, + /// Port name (e.g., "qsfp0") + pub port_name: Name, +} + +impl std::fmt::Display for SwitchPortUplink { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}.{}", self.switch_location, self.port_name) + } +} + +impl FromStr for SwitchPortUplink { + type Err = String; + + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('.').collect(); + if parts.len() != 2 { + return Err(format!( + "Invalid switch port format '{}'. Expected '.'", + s + )); + } + + let switch_location = parts[0].parse::().map_err(|e| { + format!("Invalid switch location '{}': {}", parts[0], e) + })?; + let port_name = parts[1] + .parse::() + .map_err(|e| format!("Invalid port name '{}': {}", parts[1], e))?; + + Ok(Self { switch_location, port_name }) + } +} + /// Parameters for applying settings to switch ports. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct SwitchPortApplySettings { diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index 3fb42b3c224..8248a1b3aae 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -732,3 +732,19 @@ impl RelayState { .context("json from relay state string") } } + +/// Type of IP pool +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum IpPoolType { + /// Unicast IP pool for standard IP allocations + Unicast, + /// Multicast IP pool for multicast group allocations + Multicast, +} + +impl Default for IpPoolType { + fn default() -> Self { + Self::Unicast + } +} diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index 321b29bd12d..8e10f35661f 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -394,6 +394,16 @@ pub struct IpPool { pub identity: IdentityMetadata, /// The IP version for the pool. pub ip_version: IpVersion, + /// Type of IP pool (unicast or multicast) + pub pool_type: shared::IpPoolType, + /// Switch port uplinks for multicast pools (format: "switchN.portM") + /// Only present for multicast pools. + #[serde(skip_serializing_if = "Option::is_none")] + pub switch_port_uplinks: Option>, + /// MVLAN ID for multicast pools + /// Only present for multicast pools. + #[serde(skip_serializing_if = "Option::is_none")] + pub mvlan: Option, } /// The utilization of IP addresses in a pool. diff --git a/openapi/nexus.json b/openapi/nexus.json index d88de5977b9..bcfe5e91f78 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -21293,6 +21293,13 @@ } ] }, + "mvlan": { + "nullable": true, + "description": "MVLAN ID for multicast pools Only present for multicast pools.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -21301,6 +21308,22 @@ } ] }, + "pool_type": { + "description": "Type of IP pool (unicast or multicast)", + "allOf": [ + { + "$ref": "#/components/schemas/IpPoolType" + } + ] + }, + "switch_port_uplinks": { + "nullable": true, + "description": "Switch port uplinks for multicast pools (format: \"switchN.portM\") Only present for multicast pools.", + "type": "array", + "items": { + "type": "string" + } + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -21317,6 +21340,7 @@ "id", "ip_version", "name", + "pool_type", "time_created", "time_modified" ] @@ -21337,8 +21361,34 @@ } ] }, + "mvlan": { + "nullable": true, + "description": "VLAN ID for multicast pools. Only applies to multicast pools, ignored for unicast pools.", + "allOf": [ + { + "$ref": "#/components/schemas/VlanId" + } + ] + }, "name": { "$ref": "#/components/schemas/Name" + }, + "pool_type": { + "description": "Type of IP pool (defaults to Unicast for backward compatibility)", + "default": "unicast", + "allOf": [ + { + "$ref": "#/components/schemas/IpPoolType" + } + ] + }, + "switch_port_uplinks": { + "nullable": true, + "description": "Rack switch uplinks that carry multicast traffic out of the rack to external groups. Only applies to multicast pools; ignored for unicast pools.\n\nFormat: list of `.` strings (for example, `switch0.qsfp0`), or objects with `switch_location` and `port_name`.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortUplink" + } } }, "required": [ @@ -21486,6 +21536,25 @@ "is_default" ] }, + "IpPoolType": { + "description": "Type of IP pool", + "oneOf": [ + { + "description": "Unicast IP pool for standard IP allocations", + "type": "string", + "enum": [ + "unicast" + ] + }, + { + "description": "Multicast IP pool for multicast group allocations", + "type": "string", + "enum": [ + "multicast" + ] + } + ] + }, "IpPoolUpdate": { "description": "Parameters for updating an IP Pool", "type": "object", @@ -21494,6 +21563,15 @@ "nullable": true, "type": "string" }, + "mvlan": { + "nullable": true, + "description": "VLAN ID for multicast pools. Only applies to multicast pools, ignored for unicast pools.", + "allOf": [ + { + "$ref": "#/components/schemas/VlanId" + } + ] + }, "name": { "nullable": true, "allOf": [ @@ -21501,6 +21579,14 @@ "$ref": "#/components/schemas/Name" } ] + }, + "switch_port_uplinks": { + "nullable": true, + "description": "Rack switch uplinks that carry multicast traffic out of the rack to external groups. Only applies to multicast pools; ignored for unicast pools.\n\nFormat: list of `.` strings (for example, `switch0.qsfp0`), or objects with `switch_location` and `port_name`.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortUplink" + } } } }, @@ -25644,6 +25730,32 @@ "items" ] }, + "SwitchPortUplink": { + "description": "Switch port uplink specification for multicast IP pools. Combines switch location and port name in \"switchN.portM\" format.", + "type": "object", + "properties": { + "port_name": { + "description": "Port name (e.g., \"qsfp0\")", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "switch_location": { + "description": "Switch location (e.g., \"switch0\")", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + }, + "required": [ + "port_name", + "switch_location" + ] + }, "SwitchResultsPage": { "description": "A single page of results", "type": "object", @@ -26687,6 +26799,12 @@ "storage" ] }, + "VlanId": { + "description": "Wrapper around a VLAN ID, ensuring it is valid.", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, "Vni": { "description": "A Geneve Virtual Network Identifier", "type": "integer", diff --git a/package-manifest.toml b/package-manifest.toml index 911f70ae33c..664b3b86177 100644 --- a/package-manifest.toml +++ b/package-manifest.toml @@ -735,8 +735,8 @@ only_for_targets.image = "standard" # the other `source.*` keys. source.type = "prebuilt" source.repo = "dendrite" -source.commit = "738c80d18d5e94eda367440ade7743e9d9f124de" -source.sha256 = "cc78c4fa4f863df62eda1f90175f3a7ffe1b34b7bb6a95bed869c2df5e6c4a08" +source.commit = "6ba23e71121c196e1e3c4e0621ba7a6f046237c7" +source.sha256 = "e8e534600ae180feec51f4aa6ff44c22c81f4c98558b84c2ca35e87845aebc4c" output.type = "zone" output.intermediate_only = true @@ -762,8 +762,8 @@ only_for_targets.image = "standard" # the other `source.*` keys. source.type = "prebuilt" source.repo = "dendrite" -source.commit = "738c80d18d5e94eda367440ade7743e9d9f124de" -source.sha256 = "55376e97f2b5695475275f78b8b3d2c8bad1100df13a75746fe82ad43e786082" +source.commit = "6ba23e71121c196e1e3c4e0621ba7a6f046237c7" +source.sha256 = "02d9c38511b16d099dab21dab7d05ab51c3d64297199037b144c99847751f960" output.type = "zone" output.intermediate_only = true @@ -782,8 +782,8 @@ only_for_targets.image = "standard" # the other `source.*` keys. source.type = "prebuilt" source.repo = "dendrite" -source.commit = "738c80d18d5e94eda367440ade7743e9d9f124de" -source.sha256 = "f2d3f38100fd49fff3884512ecfeb92c4a1d079de2c862b869c8aa83c75ba640" +source.commit = "6ba23e71121c196e1e3c4e0621ba7a6f046237c7" +source.sha256 = "173149a1044328df0259e33e02cf548f6e24197b29f10096bddcbc5544080b01" output.type = "zone" output.intermediate_only = true diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 5126e39944b..cdf0b25a845 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2086,6 +2086,14 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_version AS ENUM ( 'v6' ); +/* + * IP pool types for unicast vs multicast pools + */ +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_type AS ENUM ( + 'unicast', + 'multicast' +); + /* * An IP Pool, a collection of zero or more IP ranges for external IPs. */ @@ -2102,7 +2110,19 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( rcgen INT8 NOT NULL, /* The IP version of the ranges contained in this pool. */ - ip_version omicron.public.ip_version NOT NULL + ip_version omicron.public.ip_version NOT NULL, + + /* Pool type for unicast (default) vs multicast pools. */ + pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast', + + /* Rack switch uplinks that carry multicast traffic out of the rack to */ + /* external groups. Only applies to multicast pools (operator-configured). */ + /* Stored as switch port UUIDs. NULL for unicast pools. */ + switch_port_uplinks UUID[], + + /* MVLAN ID for multicast pools. */ + /* Only applies to multicast pools, NULL for unicast pools. */ + mvlan INT4 ); /* @@ -2113,6 +2133,14 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_pool_by_name ON omicron.public.ip_pool ) WHERE time_deleted IS NULL; +/* + * Index on pool type for efficient filtering of unicast vs multicast pools. + */ +CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( + pool_type +) WHERE + time_deleted IS NULL; + -- The order here is most-specific first, and it matters because we use this -- fact to select the most specific default in the case where there is both a -- silo default and a fleet default. If we were to add a project type, it should @@ -6688,7 +6716,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '193.0.0', NULL) + (TRUE, NOW(), NOW(), '194.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/multicast-pool-support/up01.sql b/schema/crdb/multicast-pool-support/up01.sql new file mode 100644 index 00000000000..c6ea0f0b830 --- /dev/null +++ b/schema/crdb/multicast-pool-support/up01.sql @@ -0,0 +1,30 @@ +-- IP Pool multicast support: Add pool types for unicast vs multicast pools + +-- Add IP pool type for unicast vs multicast pools +CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_type AS ENUM ( + 'unicast', + 'multicast' +); + +-- Add pool type column to ip_pool table +-- Defaults to 'unicast' for existing pools +ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast'; + +-- Add switch port uplinks for multicast pools (array of switch port UUIDs) +-- Only applies to multicast pools for static (operator) configuration +-- Always NULL for unicast pools +ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS switch_port_uplinks UUID[]; + +-- Add MVLAN ID for multicast pools +-- Only applies to multicast pools for static (operator) configuration +-- Always NULL for unicast pools +ALTER TABLE omicron.public.ip_pool + ADD COLUMN IF NOT EXISTS mvlan INT4; + +-- Add index on pool_type for efficient filtering +CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( + pool_type +) WHERE + time_deleted IS NULL; diff --git a/tools/dendrite_stub_checksums b/tools/dendrite_stub_checksums index df90dcf5760..872d2b322a2 100644 --- a/tools/dendrite_stub_checksums +++ b/tools/dendrite_stub_checksums @@ -1,3 +1,3 @@ -CIDL_SHA256_ILLUMOS="cc78c4fa4f863df62eda1f90175f3a7ffe1b34b7bb6a95bed869c2df5e6c4a08" -CIDL_SHA256_LINUX_DPD="c806645b8bfa2b605c4cb48c33a7470ba91c82696df59738518087f92f4bb2e0" -CIDL_SHA256_LINUX_SWADM="d59294cd4094c10c50341bf94deebccf91376a7e377c5a3b0113344b8841510a" +CIDL_SHA256_ILLUMOS="e8e534600ae180feec51f4aa6ff44c22c81f4c98558b84c2ca35e87845aebc4c" +CIDL_SHA256_LINUX_DPD="85b7979e462b6287dbf7613c3c874c99a6c60027575be7677fd29b41453751c8" +CIDL_SHA256_LINUX_SWADM="1923174147be3c6787df2522027749c2971c4fc1bf9463f0e132f6592105a2f8" diff --git a/tools/dendrite_version b/tools/dendrite_version index 407a104707b..f232d3d6d05 100644 --- a/tools/dendrite_version +++ b/tools/dendrite_version @@ -1 +1 @@ -COMMIT="738c80d18d5e94eda367440ade7743e9d9f124de" +COMMIT="6ba23e71121c196e1e3c4e0621ba7a6f046237c7" From bcb4fc6a8d3510038016df585aac17fba67387d6 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Thu, 9 Oct 2025 14:51:02 +0000 Subject: [PATCH 2/7] [review] move mvlan and switch port uplinks (for mcast egress) out of pools --- docs/control-plane-architecture.adoc | 2 + docs/networking.adoc | 2 + end-to-end-tests/src/bin/bootstrap.rs | 2 - end-to-end-tests/src/bin/commtest.rs | 2 - nexus/db-model/src/ip_pool.rs | 35 -- nexus/db-queries/src/db/datastore/ip_pool.rs | 10 +- nexus/db-schema/src/schema.rs | 2 - nexus/src/app/ip_pool.rs | 147 +----- nexus/src/external_api/http_entrypoints.rs | 3 +- nexus/tests/integration_tests/endpoints.rs | 2 - nexus/tests/integration_tests/ip_pools.rs | 474 +----------------- nexus/types/src/external_api/deserializers.rs | 112 ----- nexus/types/src/external_api/mod.rs | 1 - nexus/types/src/external_api/params.rs | 78 --- nexus/types/src/external_api/views.rs | 8 - openapi/nexus.json | 81 --- schema/crdb/dbinit.sql | 11 +- schema/crdb/multicast-pool-support/up01.sql | 12 - 18 files changed, 15 insertions(+), 969 deletions(-) delete mode 100644 nexus/types/src/external_api/deserializers.rs diff --git a/docs/control-plane-architecture.adoc b/docs/control-plane-architecture.adoc index 88b91cb7b30..12ecc6999a3 100644 --- a/docs/control-plane-architecture.adoc +++ b/docs/control-plane-architecture.adoc @@ -14,6 +14,8 @@ NOTE: Much of this material originally came from <> and <>. This NOTE: The RFD references in this documentation may be Oxide-internal. Where possible, we're trying to move relevant documentation from those RFDs into docs here. +See also: link:../notes/multicast-architecture.adoc[Multicast Architecture: VLAN Scope] + == What is the control plane In software systems the terms **data plane** and **control plane** are often used to refer to the parts of the system that directly provide resources to users (the data plane) and the parts that support the configuration, control, monitoring, and operation of the system (the control plane). Within the Oxide system, we say that the data plane comprises those parts that provide CPU resources (including both the host CPU and hypervisor software), storage resources, and network resources. The control plane provides the APIs through which users provision, configure, and monitor these resources and the mechanisms through which these APIs are implemented. Also part of the control plane are the APIs and facilities through which operators manage the system itself, including fault management, alerting, software updates for various components of the system, and so on. diff --git a/docs/networking.adoc b/docs/networking.adoc index 84c95832c0d..9d4d1ea6936 100644 --- a/docs/networking.adoc +++ b/docs/networking.adoc @@ -6,6 +6,8 @@ This is a very rough introduction to how networking works within the Oxide system and particularly the control plane (Omicron). Much more information is available in various RFDs, particularly <>. +See also: link:../notes/multicast-architecture.adoc[Multicast Architecture: VLAN Scope] + == IPv6: the least you need to know While IPv4 can be used for connectivity between Omicron and the outside world, everything else in the system uses IPv6. This section provides a _very_ cursory introduction to IPv6 for people only familiar with IPv4. You can skip this if you know IPv6. If you want slightly more detail than what's here, see https://www.roesen.org/files/ipv6_cheat_sheet.pdf[this cheat sheet]. diff --git a/end-to-end-tests/src/bin/bootstrap.rs b/end-to-end-tests/src/bin/bootstrap.rs index a62d664decd..5aa9cf22f7f 100644 --- a/end-to-end-tests/src/bin/bootstrap.rs +++ b/end-to-end-tests/src/bin/bootstrap.rs @@ -53,9 +53,7 @@ async fn run_test() -> Result<()> { name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, - mvlan: None, pool_type: IpPoolType::Unicast, - switch_port_uplinks: None, }) .send() .await?; diff --git a/end-to-end-tests/src/bin/commtest.rs b/end-to-end-tests/src/bin/commtest.rs index 6597d187b9f..2fae239db59 100644 --- a/end-to-end-tests/src/bin/commtest.rs +++ b/end-to-end-tests/src/bin/commtest.rs @@ -295,9 +295,7 @@ async fn rack_prepare( name: pool_name.parse().unwrap(), description: "Default IP pool".to_string(), ip_version, - mvlan: None, pool_type: IpPoolType::Unicast, - switch_port_uplinks: None, }) .send() .await?; diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 4728c97ae3c..817206ed24d 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -5,7 +5,6 @@ //! Model types for IP Pools and the CIDR blocks therein. use crate::Name; -use crate::SqlU16; use crate::collection::DatastoreCollectionConfig; use crate::impl_enum_type; use chrono::DateTime; @@ -21,7 +20,6 @@ use nexus_types::external_api::shared; use nexus_types::external_api::views; use nexus_types::identity::Resource; use omicron_common::api::external; -use omicron_common::vlan::VlanID; use std::net::IpAddr; use uuid::Uuid; @@ -105,12 +103,6 @@ pub struct IpPool { pub ip_version: IpVersion, /// Pool type for unicast (default) vs multicast pools. pub pool_type: IpPoolType, - /// Switch port uplinks for multicast pools (array of switch port UUIDs). - /// Only applies to multicast pools, None for unicast pools. - pub switch_port_uplinks: Option>, - /// MVLAN ID for multicast pools. - /// Only applies to multicast pools, None for unicast pools. - pub mvlan: Option, /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, @@ -129,8 +121,6 @@ impl IpPool { ), ip_version, pool_type: IpPoolType::Unicast, - switch_port_uplinks: None, - mvlan: None, rcgen: 0, } } @@ -139,8 +129,6 @@ impl IpPool { pub fn new_multicast( pool_identity: &external::IdentityMetadataCreateParams, ip_version: IpVersion, - switch_port_uplinks: Option>, - mvlan: Option, ) -> Self { Self { identity: IpPoolIdentity::new( @@ -149,8 +137,6 @@ impl IpPool { ), ip_version, pool_type: IpPoolType::Multicast, - switch_port_uplinks, - mvlan: mvlan.map(|vid| u16::from(vid).into()), rcgen: 0, } } @@ -173,23 +159,10 @@ impl From for views::IpPool { let identity = pool.identity(); let pool_type = pool.pool_type; - // Note: UUIDs expected to be converted to "switch.port" format in app - // layer, upon retrieval. - let switch_port_uplinks = match pool.switch_port_uplinks { - Some(uuid_list) => Some( - uuid_list.into_iter().map(|uuid| uuid.to_string()).collect(), - ), - None => None, - }; - - let mvlan = pool.mvlan.map(|vlan| vlan.into()); - Self { identity, pool_type: pool_type.into(), ip_version: pool.ip_version.into(), - switch_port_uplinks, - mvlan, } } } @@ -203,22 +176,14 @@ impl From for views::IpPool { pub struct IpPoolUpdate { pub name: Option, pub description: Option, - /// Switch port uplinks for multicast pools (array of switch port UUIDs), - /// used for multicast traffic outbound from the rack to external networks. - pub switch_port_uplinks: Option>, - /// MVLAN ID for multicast pools. - pub mvlan: Option, pub time_modified: DateTime, } -// Used for unicast updates. impl From for IpPoolUpdate { fn from(params: params::IpPoolUpdate) -> Self { Self { name: params.identity.name.map(|n| n.into()), description: params.identity.description, - switch_port_uplinks: None, // no change - mvlan: None, // no change time_modified: Utc::now(), } } diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 6c2c4ca5557..7b8e421ed5d 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -1818,8 +1818,6 @@ mod test { ip_version, rcgen: 0, pool_type: IpPoolType::Unicast, - mvlan: None, - switch_port_uplinks: None, }; let pool = datastore .ip_pool_create(&opctx, params) @@ -2187,7 +2185,7 @@ mod test { let pool = datastore .ip_pool_create( &opctx, - IpPool::new_multicast(&identity, IpVersion::V4, None, None), + IpPool::new_multicast(&identity, IpVersion::V4), ) .await .expect("Failed to create multicast IP pool"); @@ -2257,7 +2255,7 @@ mod test { let pool = datastore .ip_pool_create( &opctx, - IpPool::new_multicast(&identity, IpVersion::V4, None, None), + IpPool::new_multicast(&identity, IpVersion::V4), ) .await .expect("Failed to create multicast IP pool"); @@ -2306,8 +2304,6 @@ mod test { IpPool::new_multicast( &ipv4_identity, IpVersion::V4, - None, - None, ), ) .await @@ -2348,8 +2344,6 @@ mod test { IpPool::new_multicast( &ipv6_identity, IpVersion::V6, - None, - None, ), ) .await diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index 175e581863a..f58f3cca620 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -630,8 +630,6 @@ table! { time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, pool_type -> crate::enums::IpPoolTypeEnum, - switch_port_uplinks -> Nullable>, - mvlan -> Nullable, rcgen -> Int8, } } diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 3b40b6938ac..24c70c89fdf 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -6,8 +6,6 @@ use crate::external_api::params; use crate::external_api::shared; -use crate::external_api::views; -use chrono::Utc; use ipnetwork::IpNetwork; use nexus_db_lookup::LookupPath; use nexus_db_lookup::lookup; @@ -80,38 +78,12 @@ impl super::Nexus { // https://github.com/oxidecomputer/omicron/issues/8881 let ip_version = pool_params.ip_version.into(); - let pool = match ( - pool_params.pool_type.clone(), - pool_params.switch_port_uplinks.is_some(), - ) { - (shared::IpPoolType::Unicast, true) => { - return Err(Error::invalid_request( - "switch_port_uplinks are only allowed for multicast IP pools", - )); - } - (shared::IpPoolType::Unicast, false) => { - if pool_params.mvlan.is_some() { - return Err(Error::invalid_request( - "mvlan is only allowed for multicast IP pools", - )); - } + let pool = match pool_params.pool_type.clone() { + shared::IpPoolType::Unicast => { IpPool::new(&pool_params.identity, ip_version) } - (shared::IpPoolType::Multicast, _) => { - let switch_port_ids = self - .resolve_switch_port_ids( - opctx, - self.rack_id(), - &pool_params.switch_port_uplinks, - ) - .await?; - - IpPool::new_multicast( - &pool_params.identity, - ip_version, - switch_port_ids, - pool_params.mvlan, - ) + shared::IpPoolType::Multicast => { + IpPool::new_multicast(&pool_params.identity, ip_version) } }; @@ -316,21 +288,7 @@ impl super::Nexus { return Err(not_found_from_lookup(pool_lookup)); } - let switch_port_ids = self - .resolve_switch_port_ids( - opctx, - self.rack_id(), - &updates.switch_port_uplinks, - ) - .await?; - - let updates_db = IpPoolUpdate { - name: updates.identity.name.clone().map(Into::into), - description: updates.identity.description.clone(), - switch_port_uplinks: switch_port_ids, - mvlan: updates.mvlan.map(|vid| u16::from(vid).into()), - time_modified: Utc::now(), - }; + let updates_db = IpPoolUpdate::from(updates.clone()); self.db_datastore.ip_pool_update(opctx, &authz_pool, updates_db).await } @@ -544,99 +502,4 @@ impl super::Nexus { opctx.authorize(authz::Action::Modify, &authz_pool).await?; self.db_datastore.ip_pool_delete_range(opctx, &authz_pool, range).await } - - async fn resolve_switch_port_ids( - &self, - opctx: &OpContext, - rack_id: Uuid, - uplinks: &Option>, - ) -> Result>, Error> { - match uplinks { - None => Ok(None), - Some(list) => { - let mut ids = Vec::with_capacity(list.len()); - - for uplink in list { - let switch_location = - Name::from(uplink.switch_location.clone()); - let port_name = Name::from(uplink.port_name.clone()); - let id = self - .db_datastore - .switch_port_get_id( - opctx, - rack_id, - switch_location, - port_name, - ) - .await - .map_err(|_| { - Error::invalid_value( - "switch_port_uplinks", - format!("Switch port '{}' not found", uplink), - ) - })?; - ids.push(id); - } - Ok(Some(ids)) - } - } - } - - /// Convert IP pool with proper switch port name resolution in an async - /// context. - pub(crate) async fn ip_pool_to_view( - &self, - opctx: &OpContext, - pool: db::model::IpPool, - ) -> Result { - let identity = pool.identity(); - let pool_type = pool.pool_type; - - // Convert switch port UUIDs to "switch.port" format - let switch_port_uplinks = self - .resolve_switch_port_names(opctx, &pool.switch_port_uplinks) - .await?; - - let mvlan = pool.mvlan.map(|vlan| vlan.into()); - - Ok(views::IpPool { - identity, - ip_version: pool.ip_version.into(), - pool_type: pool_type.into(), - switch_port_uplinks, - mvlan, - }) - } - - // Convert switch port UUIDs to "switch.port" format for views - async fn resolve_switch_port_names( - &self, - opctx: &OpContext, - switch_port_ids: &Option>, - ) -> Result>, Error> { - match switch_port_ids { - None => Ok(None), - Some(ids) => { - let mut names = Vec::with_capacity(ids.len()); - for &id in ids { - let switch_port = self - .db_datastore - .switch_port_get(opctx, id) - .await - .map_err(|_| { - Error::internal_error(&format!( - "Switch port with ID {} not found", - id - )) - })?; - let name = format!( - "{}.{}", - switch_port.switch_location, switch_port.port_name - ); - names.push(name); - } - Ok(Some(names)) - } - } - } } diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 9ae8ad81e69..3275acd49e9 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -1775,8 +1775,7 @@ impl NexusExternalApi for NexusExternalApiImpl { let opctx = crate::context::op_context_for_external_api(&rqctx).await?; let pool = nexus.ip_pool_create(&opctx, &pool_params).await?; - let pool_view = nexus.ip_pool_to_view(&opctx, pool).await?; - Ok(HttpResponseCreated(pool_view)) + Ok(HttpResponseCreated(pool.into())) }; apictx .context diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index 3e8f4b503fd..1500e6158ae 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -953,8 +953,6 @@ pub static DEMO_IP_POOL_UPDATE: LazyLock = name: None, description: Some(String::from("a new IP pool")), }, - mvlan: None, - switch_port_uplinks: None, }); pub static DEMO_IP_POOL_SILOS_URL: LazyLock = LazyLock::new(|| format!("{}/silos", *DEMO_IP_POOL_URL)); diff --git a/nexus/tests/integration_tests/ip_pools.rs b/nexus/tests/integration_tests/ip_pools.rs index fe86208bac8..9c16e8cfb4d 100644 --- a/nexus/tests/integration_tests/ip_pools.rs +++ b/nexus/tests/integration_tests/ip_pools.rs @@ -62,7 +62,6 @@ use omicron_common::api::external::InstanceState; use omicron_common::api::external::NameOrId; use omicron_common::api::external::SimpleIdentityOrName; use omicron_common::api::external::{IdentityMetadataCreateParams, Name}; -use omicron_common::vlan::VlanID; use omicron_nexus::TestInterfaces; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::InstanceUuid; @@ -177,8 +176,6 @@ async fn test_ip_pool_basic_crud(cptestctx: &ControlPlaneTestContext) { name: Some(String::from(new_pool_name).parse().unwrap()), description: None, }, - mvlan: None, - switch_port_uplinks: None, }; let modified_pool: IpPool = object_put(client, &ip_pool_url, &updates).await; @@ -386,8 +383,6 @@ async fn test_ip_pool_service_no_cud(cptestctx: &ControlPlaneTestContext) { name: Some("test".parse().unwrap()), description: Some("test".to_string()), }, - mvlan: None, - switch_port_uplinks: None, }; let error = object_put_error( client, @@ -1531,19 +1526,13 @@ fn assert_ranges_eq(first: &IpPoolRange, second: &IpPoolRange) { assert_eq!(first.range.last_address(), second.range.last_address()); } -fn assert_unicast_defaults(pool: &IpPool) { - assert_eq!(pool.pool_type, IpPoolType::Unicast); - assert!(pool.mvlan.is_none()); - assert!(pool.switch_port_uplinks.is_none()); -} - #[nexus_test] async fn test_ip_pool_unicast_defaults(cptestctx: &ControlPlaneTestContext) { let client = &cptestctx.external_client; // Test that regular IP pool creation uses unicast defaults let pool = create_pool(client, "unicast-test", IpVersion::V4).await; - assert_unicast_defaults(&pool); + assert_eq!(pool.pool_type, IpPoolType::Unicast); // Test that explicitly creating with default type still works let params = IpPoolCreate::new( @@ -1555,464 +1544,5 @@ async fn test_ip_pool_unicast_defaults(cptestctx: &ControlPlaneTestContext) { ); let pool: IpPool = object_create(client, "/v1/system/ip-pools", ¶ms).await; - assert_unicast_defaults(&pool); -} - -#[nexus_test] -async fn test_ip_pool_multicast_crud(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - // Create multicast IP pool - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "multicast-test".parse().unwrap(), - description: "Test multicast pool".to_string(), - }, - IpVersion::V4, - Some(vec!["switch0.qsfp0".parse().unwrap()]), - VlanID::new(100).ok(), - ); - - let pool: IpPool = - object_create(client, "/v1/system/ip-pools", ¶ms).await; - assert_eq!(pool.pool_type, IpPoolType::Multicast); - assert_eq!(pool.mvlan, Some(100u16)); - assert!(pool.switch_port_uplinks.is_some()); - let uplinks = pool.switch_port_uplinks.as_ref().unwrap(); - assert_eq!(uplinks.len(), 1); - // Verify view shows "switch.port" format - assert_eq!(uplinks[0], "switch0.qsfp0"); - - // Test update - change VLAN and remove uplinks - let updates = IpPoolUpdate { - identity: IdentityMetadataUpdateParams { - name: None, - description: Some("Updated multicast pool".to_string()), - }, - mvlan: VlanID::new(200).ok(), - switch_port_uplinks: Some(vec![]), // Remove all uplinks - }; - - let pool_url = "/v1/system/ip-pools/multicast-test"; - let updated_pool: IpPool = object_put(client, pool_url, &updates).await; - assert_eq!(updated_pool.mvlan, Some(200u16)); - let uplinks = updated_pool.switch_port_uplinks.as_ref().unwrap(); - assert_eq!(uplinks.len(), 0); // All uplinks removed - - // Note: Field clearing semantics would need to be tested separately - // as the update API uses None to mean "no change", not "clear field" -} - -#[nexus_test] -async fn test_ip_pool_multicast_ranges(cptestctx: &ControlPlaneTestContext) { - let client = &cptestctx.external_client; - - // Create IPv4 multicast pool - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "multicast-ipv4".parse().unwrap(), - description: "IPv4 multicast pool".to_string(), - }, - IpVersion::V4, - None, - None, - ); - - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", ¶ms).await; - let pool_url = "/v1/system/ip-pools/multicast-ipv4"; - let ranges_url = format!("{}/ranges/add", pool_url); - - // Add IPv4 multicast range (224.0.0.0/4) - let ipv4_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(224, 1, 1, 1), - std::net::Ipv4Addr::new(224, 1, 1, 10), - ) - .unwrap(), - ); - - let created_range: IpPoolRange = - object_create(client, &ranges_url, &ipv4_range).await; - assert_eq!(ipv4_range.first_address(), created_range.range.first_address()); - assert_eq!(ipv4_range.last_address(), created_range.range.last_address()); - - // Verify utilization - assert_ip_pool_utilization(client, "multicast-ipv4", 0, 10.0).await; -} - -#[nexus_test] -async fn test_ip_pool_multicast_silo_linking( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Create multicast pool - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "multicast-silo-test".parse().unwrap(), - description: "Multicast pool for silo linking".to_string(), - }, - IpVersion::V4, - None, - VlanID::new(300).ok(), - ); - - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", ¶ms).await; - - // Create silo to link with - let silo = - create_silo(&client, "multicast-silo", true, SiloIdentityMode::SamlJit) - .await; - - // Link multicast pool to silo - link_ip_pool(client, "multicast-silo-test", &silo.id(), true).await; - - // Verify the link shows up correctly - let silo_pools = pools_for_silo(client, "multicast-silo").await; - assert_eq!(silo_pools.len(), 1); - assert_eq!(silo_pools[0].identity.name, "multicast-silo-test"); - // Note: SiloIpPool doesn't expose pool_type, would need separate lookup - assert!(silo_pools[0].is_default); - - // Verify pool shows linked silo - let linked_silos = silos_for_pool(client, "multicast-silo-test").await; - assert_eq!(linked_silos.items.len(), 1); - assert_eq!(linked_silos.items[0].silo_id, silo.id()); - assert!(linked_silos.items[0].is_default); -} - -#[nexus_test] -async fn test_ip_pool_mixed_unicast_multicast( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Create one of each type - let unicast_pool = create_pool(client, "unicast", IpVersion::V4).await; - assert_unicast_defaults(&unicast_pool); - - let multicast_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "multicast".parse().unwrap(), - description: "Multicast pool".to_string(), - }, - IpVersion::V4, - Some(vec!["switch0.qsfp0".parse().unwrap()]), - VlanID::new(400).ok(), - ); - - let multicast_pool: IpPool = - object_create(client, "/v1/system/ip-pools", &multicast_params).await; - assert_eq!(multicast_pool.pool_type, IpPoolType::Multicast); - - // List all pools - should see both types - let all_pools = get_ip_pools(client).await; - assert_eq!(all_pools.len(), 2); - - // Verify each has correct type - for pool in all_pools { - match pool.identity.name.as_str() { - "unicast" => assert_unicast_defaults(&pool), - "multicast" => assert_eq!(pool.pool_type, IpPoolType::Multicast), - _ => panic!("Unexpected pool name: {}", pool.identity.name), - } - } -} - -#[nexus_test] -async fn test_ip_pool_unicast_rejects_multicast_fields( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Try to create unicast pool with multicast-only fields - should be rejected - let mut params = IpPoolCreate::new( - IdentityMetadataCreateParams { - name: "invalid-unicast".parse().unwrap(), - description: "Unicast pool with invalid multicast fields" - .to_string(), - }, - IpVersion::V4, - ); - params.mvlan = VlanID::new(100).ok(); // This should be rejected for unicast - - let error = object_create_error( - client, - "/v1/system/ip-pools", - ¶ms, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("mvlan") - || error.message.contains("VLAN") - || error.message.contains("unicast") - ); - - // Try to create unicast pool with uplinks - should be rejected - let mut params = IpPoolCreate::new( - IdentityMetadataCreateParams { - name: "invalid-unicast2".parse().unwrap(), - description: "Unicast pool with uplinks".to_string(), - }, - IpVersion::V4, - ); - params.switch_port_uplinks = Some(vec!["switch0.qsfp0".parse().unwrap()]); - - let error = object_create_error( - client, - "/v1/system/ip-pools", - ¶ms, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("uplink") - || error.message.contains("switch") - || error.message.contains("unicast") - ); - - // Both fields together should also fail - let mut params = IpPoolCreate::new( - IdentityMetadataCreateParams { - name: "invalid-unicast3".parse().unwrap(), - description: "Unicast pool with both invalid fields".to_string(), - }, - IpVersion::V4, - ); - params.mvlan = VlanID::new(200).ok(); - params.switch_port_uplinks = Some(vec!["switch0.qsfp0".parse().unwrap()]); - - let error = object_create_error( - client, - "/v1/system/ip-pools", - ¶ms, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("unicast") - || error.message.contains("mvlan") - || error.message.contains("uplink") - ); -} - -#[nexus_test] -async fn test_ip_pool_multicast_invalid_vlan( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Test valid VLAN range first (to ensure we understand the API) - let valid_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "valid-vlan".parse().unwrap(), - description: "Multicast pool with valid VLAN".to_string(), - }, - IpVersion::V4, - None, - VlanID::new(100).ok(), - ); - - // This should succeed - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", &valid_params).await; - - // Now test edge cases - VLAN 4094 should be valid (at the boundary) - let boundary_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "boundary-vlan".parse().unwrap(), - description: "Multicast pool with boundary VLAN".to_string(), - }, - IpVersion::V4, - None, - VlanID::new(4094).ok(), - ); - - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", &boundary_params).await; -} - -#[nexus_test] -async fn test_ip_pool_multicast_invalid_uplinks( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Test with empty uplinks list - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "empty-uplinks".parse().unwrap(), - description: "Multicast pool with empty uplinks".to_string(), - }, - IpVersion::V4, - Some(vec![]), - VlanID::new(100).ok(), - ); - - // Empty list should be fine - just means no specific uplinks configured - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", ¶ms).await; - - // Test with duplicate uplinks - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "duplicate-uplinks".parse().unwrap(), - description: "Multicast pool with duplicate uplinks".to_string(), - }, - IpVersion::V4, - Some(vec![ - "switch0.qsfp0".parse().unwrap(), - "switch0.qsfp0".parse().unwrap(), // Duplicate - should be automatically removed - ]), - VlanID::new(200).ok(), - ); - - // Duplicates should be automatically removed by the deserializer - let _pool: IpPool = - object_create(client, "/v1/system/ip-pools", ¶ms).await; - let uplinks = _pool.switch_port_uplinks.as_ref().unwrap(); - assert_eq!(uplinks.len(), 1); // Duplicate should be removed, only one entry - - // Test with non-existent switch port - let params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "invalid-switch-port".parse().unwrap(), - description: "Multicast pool with invalid switch port".to_string(), - }, - IpVersion::V4, - Some(vec!["switch1.qsfp0".parse().unwrap()]), // switch1 doesn't exist - VlanID::new(300).ok(), - ); - - // Should fail with 400 error about switch port not found - let error = object_create_error( - client, - "/v1/system/ip-pools", - ¶ms, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("switch1.qsfp0") - && error.message.contains("not found") - ); -} - -/// Test ASM/SSM multicast pool validation - ensure pools cannot mix ASM and SSM ranges -#[nexus_test] -async fn test_multicast_pool_asm_ssm_validation( - cptestctx: &ControlPlaneTestContext, -) { - let client = &cptestctx.external_client; - - // Create pure ASM multicast pool - let asm_pool_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "asm-pool".parse().unwrap(), - description: "Pure ASM multicast pool".to_string(), - }, - IpVersion::V4, - Some(vec!["switch0.qsfp0".parse().unwrap()]), - VlanID::new(100).ok(), - ); - let asm_pool: IpPool = - object_create(client, "/v1/system/ip-pools", &asm_pool_params).await; - - // Add ASM range (224.x.x.x) - should succeed - let asm_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(224, 1, 0, 1), - std::net::Ipv4Addr::new(224, 1, 0, 50), - ) - .unwrap(), - ); - let add_asm_url = - format!("/v1/system/ip-pools/{}/ranges/add", asm_pool.identity.name); - object_create::(client, &add_asm_url, &asm_range) - .await; - - // Try to add SSM range (232.x.x.x) to ASM pool - should fail - let ssm_range = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(232, 1, 0, 1), - std::net::Ipv4Addr::new(232, 1, 0, 50), - ) - .unwrap(), - ); - let error = object_create_error( - client, - &add_asm_url, - &ssm_range, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("Cannot mix") - && error.message.contains("ASM") - && error.message.contains("SSM"), - "Expected ASM/SSM mixing error, got: {}", - error.message - ); - - // Create pure SSM multicast pool - let ssm_pool_params = IpPoolCreate::new_multicast( - IdentityMetadataCreateParams { - name: "ssm-pool".parse().unwrap(), - description: "Pure SSM multicast pool".to_string(), - }, - IpVersion::V4, - Some(vec!["switch0.qsfp0".parse().unwrap()]), - VlanID::new(200).ok(), - ); - let ssm_pool: IpPool = - object_create(client, "/v1/system/ip-pools", &ssm_pool_params).await; - - // Add SSM range (232.x.x.x) - should succeed - let add_ssm_url = - format!("/v1/system/ip-pools/{}/ranges/add", ssm_pool.identity.name); - object_create::(client, &add_ssm_url, &ssm_range) - .await; - - // Try to add ASM range (224.x.x.x) to SSM pool - should fail - let error = object_create_error( - client, - &add_ssm_url, - &asm_range, - StatusCode::BAD_REQUEST, - ) - .await; - assert!( - error.message.contains("Cannot mix") - && error.message.contains("ASM") - && error.message.contains("SSM"), - "Expected ASM/SSM mixing error, got: {}", - error.message - ); - - // Note: IPv6 multicast ranges are not yet supported in the system, - // so we focus on IPv4 validation for now - - // Verify that multiple ranges of the same type can be added - let asm_range2 = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(224, 2, 0, 1), - std::net::Ipv4Addr::new(224, 2, 0, 50), - ) - .unwrap(), - ); - object_create::(client, &add_asm_url, &asm_range2) - .await; - - let ssm_range2 = IpRange::V4( - Ipv4Range::new( - std::net::Ipv4Addr::new(232, 2, 0, 1), - std::net::Ipv4Addr::new(232, 2, 0, 50), - ) - .unwrap(), - ); - object_create::(client, &add_ssm_url, &ssm_range2) - .await; + assert_eq!(pool.pool_type, IpPoolType::Unicast); } diff --git a/nexus/types/src/external_api/deserializers.rs b/nexus/types/src/external_api/deserializers.rs deleted file mode 100644 index cc802613f70..00000000000 --- a/nexus/types/src/external_api/deserializers.rs +++ /dev/null @@ -1,112 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at https://mozilla.org/MPL/2.0/. - -//! Deserializer utilities for API parameter types - -use std::fmt; - -use serde::{ - Deserializer, - de::{self, Visitor}, -}; - -use crate::external_api::params::SwitchPortUplink; - -/// Deserializes an optional `Vec` into `Vec` with deduplication. -/// -/// This deserializer handles both string and object formats: -/// - String format: "switch0.qsfp0" (from real API calls) -/// - Object format: {"switch_location": "switch0", "port_name": "qsfp0"} (from test serialization) -/// -/// Duplicates are automatically removed based on the string representation. -pub fn parse_and_dedup_switch_port_uplinks<'de, D>( - deserializer: D, -) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - struct SwitchPortUplinksVisitor; - - impl<'de> Visitor<'de> for SwitchPortUplinksVisitor { - type Value = Option>; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("an optional array of switch port uplinks") - } - - fn visit_none(self) -> Result - where - E: de::Error, - { - Ok(None) - } - - fn visit_unit(self) -> Result - where - E: de::Error, - { - Ok(None) - } - - fn visit_some(self, deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let vec = - deserializer.deserialize_seq(SwitchPortUplinksSeqVisitor)?; - Ok(Some(vec)) - } - } - - struct SwitchPortUplinksSeqVisitor; - - impl<'de> Visitor<'de> for SwitchPortUplinksSeqVisitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("an array of switch port uplinks") - } - - fn visit_seq(self, mut seq: A) -> Result - where - A: de::SeqAccess<'de>, - { - let mut seen = std::collections::HashSet::new(); - let mut result = Vec::new(); - - while let Some(item) = seq.next_element::()? { - let uplink = match item { - // Handle string format: "switch0.qsfp0" - serde_json::Value::String(s) => { - if !seen.insert(s.clone()) { - continue; // Skip duplicate - } - s.parse::() - .map_err(|e| de::Error::custom(e))? - } - // Handle object format: {"switch_location": "switch0", "port_name": "qsfp0"} - serde_json::Value::Object(_) => { - let uplink: SwitchPortUplink = - serde_json::from_value(item) - .map_err(|e| de::Error::custom(e))?; - let uplink_str = uplink.to_string(); - if !seen.insert(uplink_str) { - continue; // Skip duplicate - } - uplink - } - _ => { - return Err(de::Error::custom( - "expected string or object", - )); - } - }; - result.push(uplink); - } - Ok(result) - } - } - - deserializer.deserialize_option(SwitchPortUplinksVisitor) -} diff --git a/nexus/types/src/external_api/mod.rs b/nexus/types/src/external_api/mod.rs index d2943fb157c..363ddd3f41d 100644 --- a/nexus/types/src/external_api/mod.rs +++ b/nexus/types/src/external_api/mod.rs @@ -2,7 +2,6 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -mod deserializers; pub mod headers; pub mod params; pub mod shared; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 5b4ee6ecf26..0137cf1af9a 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -17,7 +17,6 @@ use omicron_common::api::external::{ Nullable, PaginationOrder, RouteDestination, RouteTarget, UserId, }; use omicron_common::disk::DiskVariant; -use omicron_common::vlan::VlanID; use omicron_uuid_kinds::*; use oxnet::{IpNet, Ipv4Net, Ipv6Net}; use parse_display::Display; @@ -1011,22 +1010,6 @@ pub struct IpPoolCreate { /// Type of IP pool (defaults to Unicast for backward compatibility) #[serde(default)] pub pool_type: shared::IpPoolType, - /// Rack switch uplinks that carry multicast traffic out of the rack to - /// external groups. Only applies to multicast pools; ignored for unicast - /// pools. - /// - /// Format: list of `.` strings (for example, `switch0.qsfp0`), - /// or objects with `switch_location` and `port_name`. - #[serde( - default, - skip_serializing_if = "Option::is_none", - deserialize_with = "crate::external_api::deserializers::parse_and_dedup_switch_port_uplinks" - )] - pub switch_port_uplinks: Option>, - /// VLAN ID for multicast pools. - /// Only applies to multicast pools, ignored for unicast pools. - #[serde(skip_serializing_if = "Option::is_none")] - pub mvlan: Option, } impl IpPoolCreate { @@ -1039,8 +1022,6 @@ impl IpPoolCreate { identity, ip_version, pool_type: shared::IpPoolType::Unicast, - switch_port_uplinks: None, - mvlan: None, } } @@ -1048,15 +1029,11 @@ impl IpPoolCreate { pub fn new_multicast( identity: IdentityMetadataCreateParams, ip_version: IpVersion, - switch_port_uplinks: Option>, - mvlan: Option, ) -> Self { Self { identity, ip_version, pool_type: shared::IpPoolType::Multicast, - switch_port_uplinks, - mvlan, } } } @@ -1066,22 +1043,6 @@ impl IpPoolCreate { pub struct IpPoolUpdate { #[serde(flatten)] pub identity: IdentityMetadataUpdateParams, - /// Rack switch uplinks that carry multicast traffic out of the rack to - /// external groups. Only applies to multicast pools; ignored for unicast - /// pools. - /// - /// Format: list of `.` strings (for example, `switch0.qsfp0`), - /// or objects with `switch_location` and `port_name`. - #[serde( - default, - skip_serializing_if = "Option::is_none", - deserialize_with = "crate::external_api::deserializers::parse_and_dedup_switch_port_uplinks" - )] - pub switch_port_uplinks: Option>, - /// VLAN ID for multicast pools. - /// Only applies to multicast pools, ignored for unicast pools. - #[serde(skip_serializing_if = "Option::is_none")] - pub mvlan: Option, } #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] @@ -2320,45 +2281,6 @@ pub struct SwitchPortPageSelector { pub switch_port_id: Option, } -/// Switch port uplink specification for multicast IP pools. -/// Combines switch location and port name in "switchN.portM" format. -#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] -pub struct SwitchPortUplink { - /// Switch location (e.g., "switch0") - pub switch_location: Name, - /// Port name (e.g., "qsfp0") - pub port_name: Name, -} - -impl std::fmt::Display for SwitchPortUplink { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}.{}", self.switch_location, self.port_name) - } -} - -impl FromStr for SwitchPortUplink { - type Err = String; - - fn from_str(s: &str) -> Result { - let parts: Vec<&str> = s.split('.').collect(); - if parts.len() != 2 { - return Err(format!( - "Invalid switch port format '{}'. Expected '.'", - s - )); - } - - let switch_location = parts[0].parse::().map_err(|e| { - format!("Invalid switch location '{}': {}", parts[0], e) - })?; - let port_name = parts[1] - .parse::() - .map_err(|e| format!("Invalid port name '{}': {}", parts[1], e))?; - - Ok(Self { switch_location, port_name }) - } -} - /// Parameters for applying settings to switch ports. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] pub struct SwitchPortApplySettings { diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index d571fcb1b0f..cefe7d1e1b3 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -396,14 +396,6 @@ pub struct IpPool { pub ip_version: IpVersion, /// Type of IP pool (unicast or multicast) pub pool_type: shared::IpPoolType, - /// Switch port uplinks for multicast pools (format: "switchN.portM") - /// Only present for multicast pools. - #[serde(skip_serializing_if = "Option::is_none")] - pub switch_port_uplinks: Option>, - /// MVLAN ID for multicast pools - /// Only present for multicast pools. - #[serde(skip_serializing_if = "Option::is_none")] - pub mvlan: Option, } /// The utilization of IP addresses in a pool. diff --git a/openapi/nexus.json b/openapi/nexus.json index 2ad3f2b6dfd..6f86b5d0c4b 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -21490,13 +21490,6 @@ } ] }, - "mvlan": { - "nullable": true, - "description": "MVLAN ID for multicast pools Only present for multicast pools.", - "type": "integer", - "format": "uint16", - "minimum": 0 - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -21513,14 +21506,6 @@ } ] }, - "switch_port_uplinks": { - "nullable": true, - "description": "Switch port uplinks for multicast pools (format: \"switchN.portM\") Only present for multicast pools.", - "type": "array", - "items": { - "type": "string" - } - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -21558,15 +21543,6 @@ } ] }, - "mvlan": { - "nullable": true, - "description": "VLAN ID for multicast pools. Only applies to multicast pools, ignored for unicast pools.", - "allOf": [ - { - "$ref": "#/components/schemas/VlanId" - } - ] - }, "name": { "$ref": "#/components/schemas/Name" }, @@ -21578,14 +21554,6 @@ "$ref": "#/components/schemas/IpPoolType" } ] - }, - "switch_port_uplinks": { - "nullable": true, - "description": "Rack switch uplinks that carry multicast traffic out of the rack to external groups. Only applies to multicast pools; ignored for unicast pools.\n\nFormat: list of `.` strings (for example, `switch0.qsfp0`), or objects with `switch_location` and `port_name`.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortUplink" - } } }, "required": [ @@ -21760,15 +21728,6 @@ "nullable": true, "type": "string" }, - "mvlan": { - "nullable": true, - "description": "VLAN ID for multicast pools. Only applies to multicast pools, ignored for unicast pools.", - "allOf": [ - { - "$ref": "#/components/schemas/VlanId" - } - ] - }, "name": { "nullable": true, "allOf": [ @@ -21776,14 +21735,6 @@ "$ref": "#/components/schemas/Name" } ] - }, - "switch_port_uplinks": { - "nullable": true, - "description": "Rack switch uplinks that carry multicast traffic out of the rack to external groups. Only applies to multicast pools; ignored for unicast pools.\n\nFormat: list of `.` strings (for example, `switch0.qsfp0`), or objects with `switch_location` and `port_name`.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortUplink" - } } } }, @@ -25983,32 +25934,6 @@ "items" ] }, - "SwitchPortUplink": { - "description": "Switch port uplink specification for multicast IP pools. Combines switch location and port name in \"switchN.portM\" format.", - "type": "object", - "properties": { - "port_name": { - "description": "Port name (e.g., \"qsfp0\")", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] - }, - "switch_location": { - "description": "Switch location (e.g., \"switch0\")", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] - } - }, - "required": [ - "port_name", - "switch_location" - ] - }, "SwitchResultsPage": { "description": "A single page of results", "type": "object", @@ -27052,12 +26977,6 @@ "storage" ] }, - "VlanId": { - "description": "Wrapper around a VLAN ID, ensuring it is valid.", - "type": "integer", - "format": "uint16", - "minimum": 0 - }, "Vni": { "description": "A Geneve Virtual Network Identifier", "type": "integer", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index bc24f9933d4..6fc94ca29ee 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2189,16 +2189,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( ip_version omicron.public.ip_version NOT NULL, /* Pool type for unicast (default) vs multicast pools. */ - pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast', - - /* Rack switch uplinks that carry multicast traffic out of the rack to */ - /* external groups. Only applies to multicast pools (operator-configured). */ - /* Stored as switch port UUIDs. NULL for unicast pools. */ - switch_port_uplinks UUID[], - - /* MVLAN ID for multicast pools. */ - /* Only applies to multicast pools, NULL for unicast pools. */ - mvlan INT4 + pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast' ); /* diff --git a/schema/crdb/multicast-pool-support/up01.sql b/schema/crdb/multicast-pool-support/up01.sql index c6ea0f0b830..fccfcd2081f 100644 --- a/schema/crdb/multicast-pool-support/up01.sql +++ b/schema/crdb/multicast-pool-support/up01.sql @@ -11,18 +11,6 @@ CREATE TYPE IF NOT EXISTS omicron.public.ip_pool_type AS ENUM ( ALTER TABLE omicron.public.ip_pool ADD COLUMN IF NOT EXISTS pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast'; --- Add switch port uplinks for multicast pools (array of switch port UUIDs) --- Only applies to multicast pools for static (operator) configuration --- Always NULL for unicast pools -ALTER TABLE omicron.public.ip_pool - ADD COLUMN IF NOT EXISTS switch_port_uplinks UUID[]; - --- Add MVLAN ID for multicast pools --- Only applies to multicast pools for static (operator) configuration --- Always NULL for unicast pools -ALTER TABLE omicron.public.ip_pool - ADD COLUMN IF NOT EXISTS mvlan INT4; - -- Add index on pool_type for efficient filtering CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( pool_type From 44330668be81be5b3ba10a42b316702486ad73c3 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Thu, 9 Oct 2025 15:39:07 +0000 Subject: [PATCH 3/7] [fmt] fixes --- nexus/db-queries/src/db/datastore/ip_pool.rs | 10 ++-------- nexus/types/src/external_api/params.rs | 12 ++---------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index 7b8e421ed5d..b01d115636b 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -2301,10 +2301,7 @@ mod test { let ipv4_pool = datastore .ip_pool_create( &opctx, - IpPool::new_multicast( - &ipv4_identity, - IpVersion::V4, - ), + IpPool::new_multicast(&ipv4_identity, IpVersion::V4), ) .await .expect("Failed to create IPv4 multicast IP pool"); @@ -2341,10 +2338,7 @@ mod test { let ipv6_pool = datastore .ip_pool_create( &opctx, - IpPool::new_multicast( - &ipv6_identity, - IpVersion::V6, - ), + IpPool::new_multicast(&ipv6_identity, IpVersion::V6), ) .await .expect("Failed to create IPv6 multicast IP pool"); diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 0137cf1af9a..3553dcfdf3a 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -1018,11 +1018,7 @@ impl IpPoolCreate { identity: IdentityMetadataCreateParams, ip_version: IpVersion, ) -> Self { - Self { - identity, - ip_version, - pool_type: shared::IpPoolType::Unicast, - } + Self { identity, ip_version, pool_type: shared::IpPoolType::Unicast } } /// Create parameters for a multicast IP pool @@ -1030,11 +1026,7 @@ impl IpPoolCreate { identity: IdentityMetadataCreateParams, ip_version: IpVersion, ) -> Self { - Self { - identity, - ip_version, - pool_type: shared::IpPoolType::Multicast, - } + Self { identity, ip_version, pool_type: shared::IpPoolType::Multicast } } } From f7991c39d088f9249836d551d7d65fd60e072659 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Sun, 12 Oct 2025 06:11:48 +0000 Subject: [PATCH 4/7] [review] comments, validation, & cleanup --- common/src/address.rs | 24 +++++- nexus/db-queries/src/db/datastore/ip_pool.rs | 22 ++--- .../src/db/datastore/switch_port.rs | 40 --------- nexus/external-api/src/lib.rs | 13 ++- nexus/src/app/ip_pool.rs | 83 +++++++++++++++++-- nexus/types/src/external_api/params.rs | 11 ++- nexus/types/src/external_api/shared.rs | 8 +- 7 files changed, 127 insertions(+), 74 deletions(-) diff --git a/common/src/address.rs b/common/src/address.rs index 84e69c15af1..0a5d63ff5ba 100644 --- a/common/src/address.rs +++ b/common/src/address.rs @@ -26,13 +26,31 @@ pub const SLED_PREFIX: u8 = 64; /// IPv4 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: /// . +/// +/// RFC 4607 Section 3 allocates 232.0.0.0/8 as the IPv4 SSM address range. +/// This is a single contiguous block, unlike IPv6 which has per-scope ranges. pub const IPV4_SSM_SUBNET: oxnet::Ipv4Net = oxnet::Ipv4Net::new_unchecked(Ipv4Addr::new(232, 0, 0, 0), 8); -/// IPv6 Source-Specific Multicast (SSM) flag field value as defined in RFC 4607: +/// IPv6 Source-Specific Multicast (SSM) subnet as defined in RFC 4607: /// . -/// This is the flags nibble (high nibble of second byte) for FF3x::/32 addresses. -pub const IPV6_SSM_FLAG_FIELD: u8 = 3; +/// +/// RFC 4607 Section 3 specifies "FF3x::/32 for each scope x" - meaning one +/// /32 block per scope (FF30::/32, FF31::/32, ..., FF3F::/32). +/// +/// We use /12 as an implementation convenience to match all these blocks with +/// a single subnet. This works because all SSM addresses share the same first +/// 12 bits: +/// - Bits 0-7: 11111111 (0xFF, multicast prefix) +/// - Bits 8-11: 0011 (flag field = 3, indicating SSM) +/// - Bits 12-15: xxxx (scope field, any value 0-F) +/// +/// Thus FF30::/12 efficiently matches FF30:: through FF3F:FFFF:...:FFFF, +/// covering all SSM scopes. +pub const IPV6_SSM_SUBNET: oxnet::Ipv6Net = oxnet::Ipv6Net::new_unchecked( + Ipv6Addr::new(0xff30, 0, 0, 0, 0, 0, 0, 0), + 12, +); /// maximum possible value for a tcp or udp port pub const MAX_PORT: u16 = u16::MAX; diff --git a/nexus/db-queries/src/db/datastore/ip_pool.rs b/nexus/db-queries/src/db/datastore/ip_pool.rs index b01d115636b..e1e8d17adf3 100644 --- a/nexus/db-queries/src/db/datastore/ip_pool.rs +++ b/nexus/db-queries/src/db/datastore/ip_pool.rs @@ -45,7 +45,7 @@ use nexus_db_model::IpVersion; use nexus_db_model::Project; use nexus_db_model::Vpc; use nexus_types::external_api::shared::IpRange; -use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_FLAG_FIELD}; +use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_SUBNET}; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -1458,9 +1458,7 @@ impl DataStore { } IpRange::V6(v6_range) => { let first = v6_range.first_address(); - // Check if the flag field (second nibble) is 3 for SSM - let flag_field = (first.octets()[1] & 0xF0) >> 4; - flag_field == IPV6_SSM_FLAG_FIELD + IPV6_SSM_SUBNET.contains(first) } }; @@ -1481,11 +1479,7 @@ impl DataStore { for existing_range in &existing_ranges { let existing_is_ssm = match &existing_range.first_address { IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), - IpNetwork::V6(net) => { - // Check if the flag field (second nibble) is 3 for SSM - let flag_field = (net.network().octets()[1] & 0xF0) >> 4; - flag_field == IPV6_SSM_FLAG_FIELD - } + IpNetwork::V6(net) => IPV6_SSM_SUBNET.contains(net.network()), }; // If we have a mix of ASM and SSM within this pool, reject @@ -1493,9 +1487,7 @@ impl DataStore { let new_type = if new_range_is_ssm { "SSM" } else { "ASM" }; let existing_type = if existing_is_ssm { "SSM" } else { "ASM" }; return Err(Error::invalid_request(&format!( - "Cannot mix {new_type} and {existing_type} ranges in multicast pool. \ - {new_type} ranges (IPv4 232/8, IPv6 FF3x::/32) and \ - {existing_type} ranges (IPv4 224/4, IPv6 FF0x-FF2x::/32) must be in separate pools." + "Cannot mix {new_type} and {existing_type} ranges in the same multicast pool" ))); } } @@ -1534,11 +1526,7 @@ impl DataStore { let is_ssm = match range.first_address { IpNetwork::V4(net) => IPV4_SSM_SUBNET.contains(net.network()), - IpNetwork::V6(net) => { - // Check if the flag field (second nibble) is 3 for SSM - let flags = (net.network().octets()[1] & 0xF0) >> 4; - flags == IPV6_SSM_FLAG_FIELD - } + IpNetwork::V6(net) => IPV6_SSM_SUBNET.contains(net.network()), }; Ok(is_ssm) diff --git a/nexus/db-queries/src/db/datastore/switch_port.rs b/nexus/db-queries/src/db/datastore/switch_port.rs index 9a756916e6b..c4093806eda 100644 --- a/nexus/db-queries/src/db/datastore/switch_port.rs +++ b/nexus/db-queries/src/db/datastore/switch_port.rs @@ -1138,46 +1138,6 @@ impl DataStore { Ok(id) } - /// Given a list of switch port UUIDs, return a list of strings in the - /// format ".". The order of the returned list - /// matches the order of the input UUIDs. - pub async fn switch_ports_from_ids( - &self, - opctx: &OpContext, - uplink_uuids: &[Uuid], - ) -> LookupResult> { - use nexus_db_schema::schema::switch_port::{ - self, dsl, port_name, switch_location, - }; - - if uplink_uuids.is_empty() { - return Ok(Vec::new()); - } - - let conn = self.pool_connection_authorized(opctx).await?; - let uplink_uuids_vec: Vec = uplink_uuids.to_vec(); - - // Maintain the order from the input UUIDs - let mut result = Vec::with_capacity(uplink_uuids.len()); - for uuid in uplink_uuids_vec.iter() { - let switch_port_info = dsl::switch_port - .filter(switch_port::id.eq(*uuid)) - .select((switch_location, port_name)) - .first_async::<(String, String)>(&*conn) - .await - .map_err(|_| { - Error::internal_error(&format!( - "Switch port UUID {uuid} not found", - )) - })?; - - result - .push(format!("{}.{}", switch_port_info.0, switch_port_info.1)); - } - - Ok(result) - } - pub async fn switch_ports_with_uplinks( &self, opctx: &OpContext, diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs index 7bf518397e6..f3ab12cd061 100644 --- a/nexus/external-api/src/lib.rs +++ b/nexus/external-api/src/lib.rs @@ -928,6 +928,8 @@ pub trait NexusExternalApi { ) -> Result>, HttpError>; /// Create IP pool + /// + /// IPv6 is not yet supported for unicast pools. #[endpoint { method = POST, path = "/v1/system/ip-pools", @@ -1076,9 +1078,16 @@ pub trait NexusExternalApi { query_params: Query, ) -> Result>, HttpError>; - /// Add range to IP pool + /// Add range to IP pool. /// - /// IPv6 ranges are not allowed yet. + /// IPv6 ranges are not allowed yet for unicast pools. + /// + /// For multicast pools, all ranges must be either Any-Source Multicast (ASM) + /// or Source-Specific Multicast (SSM), but not both. Mixing ASM and SSM + /// ranges in the same pool is not allowed. + /// + /// ASM: IPv4 addresses outside 232.0.0.0/8, IPv6 addresses with flag field != 3 + /// SSM: IPv4 addresses in 232.0.0.0/8, IPv6 addresses with flag field = 3 #[endpoint { method = POST, path = "/v1/system/ip-pools/{pool}/ranges/add", diff --git a/nexus/src/app/ip_pool.rs b/nexus/src/app/ip_pool.rs index 24c70c89fdf..b98e938563a 100644 --- a/nexus/src/app/ip_pool.rs +++ b/nexus/src/app/ip_pool.rs @@ -19,6 +19,7 @@ use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::model::Name; use nexus_types::identity::Resource; +use omicron_common::address::{IPV4_SSM_SUBNET, IPV6_SSM_SUBNET}; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DataPageParams; use omicron_common::api::external::DeleteResult; @@ -78,6 +79,15 @@ impl super::Nexus { // https://github.com/oxidecomputer/omicron/issues/8881 let ip_version = pool_params.ip_version.into(); + // IPv6 is not yet supported for unicast pools + if matches!(pool_params.pool_type, shared::IpPoolType::Unicast) + && matches!(ip_version, IpVersion::V6) + { + return Err(Error::invalid_request( + "IPv6 pools are not yet supported for unicast pools", + )); + } + let pool = match pool_params.pool_type.clone() { shared::IpPoolType::Unicast => { IpPool::new(&pool_params.identity, ip_version) @@ -339,16 +349,33 @@ impl super::Nexus { )); } + // Validate uniformity: ensure range doesn't span multicast/unicast boundary let range_is_multicast = match range { shared::IpRange::V4(v4_range) => { let first = v4_range.first_address(); let last = v4_range.last_address(); - first.is_multicast() && last.is_multicast() + let first_is_multicast = first.is_multicast(); + let last_is_multicast = last.is_multicast(); + + if first_is_multicast != last_is_multicast { + return Err(Error::invalid_request( + "IP range cannot span multicast and unicast address spaces", + )); + } + first_is_multicast } shared::IpRange::V6(v6_range) => { let first = v6_range.first_address(); let last = v6_range.last_address(); - first.is_multicast() && last.is_multicast() + let first_is_multicast = first.is_multicast(); + let last_is_multicast = last.is_multicast(); + + if first_is_multicast != last_is_multicast { + return Err(Error::invalid_request( + "IP range cannot span multicast and unicast address spaces", + )); + } + first_is_multicast } }; @@ -360,8 +387,34 @@ impl super::Nexus { )); } - // For multicast pools, validate ASM/SSM separation - // This validation is done in the datastore layer + // For multicast pools, validate that the range doesn't span + // ASM/SSM boundaries + match range { + shared::IpRange::V4(v4_range) => { + let first = v4_range.first_address(); + let last = v4_range.last_address(); + let first_is_ssm = IPV4_SSM_SUBNET.contains(first); + let last_is_ssm = IPV4_SSM_SUBNET.contains(last); + + if first_is_ssm != last_is_ssm { + return Err(Error::invalid_request( + "IP range cannot span ASM and SSM address spaces", + )); + } + } + shared::IpRange::V6(v6_range) => { + let first = v6_range.first_address(); + let last = v6_range.last_address(); + let first_is_ssm = IPV6_SSM_SUBNET.contains(first); + let last_is_ssm = IPV6_SSM_SUBNET.contains(last); + + if first_is_ssm != last_is_ssm { + return Err(Error::invalid_request( + "IP range cannot span ASM and SSM address spaces", + )); + } + } + } } IpPoolType::Unicast => { if range_is_multicast { @@ -454,17 +507,33 @@ impl super::Nexus { )); } - // Validate that the range matches the pool type + // Validate that the range matches the pool type and that they match uniformity let range_is_multicast = match range { shared::IpRange::V4(v4_range) => { let first = v4_range.first_address(); let last = v4_range.last_address(); - first.is_multicast() && last.is_multicast() + let first_is_multicast = first.is_multicast(); + let last_is_multicast = last.is_multicast(); + + if first_is_multicast != last_is_multicast { + return Err(Error::invalid_request( + "IP range cannot span multicast and unicast address spaces", + )); + } + first_is_multicast } shared::IpRange::V6(v6_range) => { let first = v6_range.first_address(); let last = v6_range.last_address(); - first.is_multicast() && last.is_multicast() + let first_is_multicast = first.is_multicast(); + let last_is_multicast = last.is_multicast(); + + if first_is_multicast != last_is_multicast { + return Err(Error::invalid_request( + "IP range cannot span multicast and unicast address spaces", + )); + } + first_is_multicast } }; diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 3553dcfdf3a..378c28f7592 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -997,7 +997,14 @@ impl std::fmt::Debug for CertificateCreate { // IP POOLS -/// Create-time parameters for an `IpPool` +/// Create-time parameters for an `IpPool`. +/// +/// For multicast pools, all ranges must be either Any-Source Multicast (ASM) +/// or Source-Specific Multicast (SSM), but not both. Mixing ASM and SSM +/// ranges in the same pool is not allowed. +/// +/// ASM: IPv4 addresses outside 232.0.0.0/8, IPv6 addresses with flag field != 3 +/// SSM: IPv4 addresses in 232.0.0.0/8, IPv6 addresses with flag field = 3 #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct IpPoolCreate { #[serde(flatten)] @@ -1007,7 +1014,7 @@ pub struct IpPoolCreate { /// The default is IPv4. #[serde(default = "IpVersion::v4")] pub ip_version: IpVersion, - /// Type of IP pool (defaults to Unicast for backward compatibility) + /// Type of IP pool (defaults to Unicast) #[serde(default)] pub pool_type: shared::IpPoolType, } diff --git a/nexus/types/src/external_api/shared.rs b/nexus/types/src/external_api/shared.rs index f357d0eaf46..051bd3fe7dc 100644 --- a/nexus/types/src/external_api/shared.rs +++ b/nexus/types/src/external_api/shared.rs @@ -743,13 +743,15 @@ impl RelayState { } } -/// Type of IP pool +/// Type of IP pool. #[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] #[serde(rename_all = "snake_case")] pub enum IpPoolType { - /// Unicast IP pool for standard IP allocations + /// Unicast IP pool for standard IP allocations. Unicast, - /// Multicast IP pool for multicast group allocations + /// Multicast IP pool for multicast group allocations. + /// + /// All ranges in a multicast pool must be either ASM or SSM (not mixed). Multicast, } From 1b249365f54f4c828f17c18bd1e0ecda95cba87a Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Sun, 12 Oct 2025 09:22:24 +0000 Subject: [PATCH 5/7] [api] run generate post-API changes --- openapi/nexus.json | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/openapi/nexus.json b/openapi/nexus.json index 6f86b5d0c4b..9ff149c54c3 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -8304,6 +8304,7 @@ "system/ip-pools" ], "summary": "Create IP pool", + "description": "IPv6 is not yet supported for unicast pools.", "operationId": "ip_pool_create", "requestBody": { "content": { @@ -8515,8 +8516,8 @@ "tags": [ "system/ip-pools" ], - "summary": "Add range to IP pool", - "description": "IPv6 ranges are not allowed yet.", + "summary": "Add range to IP pool.", + "description": "IPv6 ranges are not allowed yet for unicast pools.\n\nFor multicast pools, all ranges must be either Any-Source Multicast (ASM) or Source-Specific Multicast (SSM), but not both. Mixing ASM and SSM ranges in the same pool is not allowed.\n\nASM: IPv4 addresses outside 232.0.0.0/8, IPv6 addresses with flag field != 3 SSM: IPv4 addresses in 232.0.0.0/8, IPv6 addresses with flag field = 3", "operationId": "ip_pool_range_add", "parameters": [ { @@ -21528,7 +21529,7 @@ ] }, "IpPoolCreate": { - "description": "Create-time parameters for an `IpPool`", + "description": "Create-time parameters for an `IpPool`.\n\nFor multicast pools, all ranges must be either Any-Source Multicast (ASM) or Source-Specific Multicast (SSM), but not both. Mixing ASM and SSM ranges in the same pool is not allowed.\n\nASM: IPv4 addresses outside 232.0.0.0/8, IPv6 addresses with flag field != 3 SSM: IPv4 addresses in 232.0.0.0/8, IPv6 addresses with flag field = 3", "type": "object", "properties": { "description": { @@ -21547,7 +21548,7 @@ "$ref": "#/components/schemas/Name" }, "pool_type": { - "description": "Type of IP pool (defaults to Unicast for backward compatibility)", + "description": "Type of IP pool (defaults to Unicast)", "default": "unicast", "allOf": [ { @@ -21702,17 +21703,17 @@ ] }, "IpPoolType": { - "description": "Type of IP pool", + "description": "Type of IP pool.", "oneOf": [ { - "description": "Unicast IP pool for standard IP allocations", + "description": "Unicast IP pool for standard IP allocations.", "type": "string", "enum": [ "unicast" ] }, { - "description": "Multicast IP pool for multicast group allocations", + "description": "Multicast IP pool for multicast group allocations.\n\nAll ranges in a multicast pool must be either ASM or SSM (not mixed).", "type": "string", "enum": [ "multicast" From 666446ff203b4cda69c44fc7a4dc0a437fec67c7 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Tue, 14 Oct 2025 01:44:35 +0000 Subject: [PATCH 6/7] [review] have IP range conversions use tryfrom Also, we add DB a constraint and specialize the range error. --- nexus/db-model/src/ip_pool.rs | 90 +++++++++++++++------ nexus/reconfigurator/preparation/src/lib.rs | 13 ++- nexus/src/external_api/http_entrypoints.rs | 12 +-- schema/crdb/dbinit.sql | 5 +- schema/crdb/multicast-pool-support/up01.sql | 6 ++ 5 files changed, 91 insertions(+), 35 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index 817206ed24d..226785e0f3c 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -23,6 +23,32 @@ use omicron_common::api::external; use std::net::IpAddr; use uuid::Uuid; +/// Errors that can occur when converting an IP pool range from the database +/// to the API representation. +#[derive(Debug, Clone)] +pub enum IpRangeConversionError { + /// The first and last addresses have mismatched IP versions (IPv4 vs IPv6). + MismatchedVersions { first: IpAddr, last: IpAddr }, + /// The IP range is invalid (e.g., last address is less than first address). + InvalidRange { msg: String }, +} + +impl std::fmt::Display for IpRangeConversionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::MismatchedVersions { first, last } => write!( + f, + "IP range has mismatched protocol versions: first={first}, last={last}" + ), + Self::InvalidRange { msg } => { + write!(f, "Invalid IP range: {msg}") + } + } + } +} + +impl std::error::Error for IpRangeConversionError {} + impl_enum_type!( IpVersionEnum: @@ -279,38 +305,50 @@ impl IpPoolRange { } } -impl From for views::IpPoolRange { - fn from(range: IpPoolRange) -> Self { - Self { +impl TryFrom for views::IpPoolRange { + type Error = external::Error; + + fn try_from(range: IpPoolRange) -> Result { + let ip_range = shared::IpRange::try_from(&range).map_err(|e| { + external::Error::internal_error(&format!( + "Invalid IP range in database (id={}, pool={}, first={}, last={}): {e:#}", + range.id, range.ip_pool_id, + range.first_address.ip(), range.last_address.ip() + )) + })?; + + Ok(Self { id: range.id, ip_pool_id: range.ip_pool_id, time_created: range.time_created, - range: shared::IpRange::from(&range), - } + range: ip_range, + }) } } -impl From<&IpPoolRange> for shared::IpRange { - fn from(range: &IpPoolRange) -> Self { - let maybe_range = - match (range.first_address.ip(), range.last_address.ip()) { - (IpAddr::V4(first), IpAddr::V4(last)) => { - shared::IpRange::try_from((first, last)) - } - (IpAddr::V6(first), IpAddr::V6(last)) => { - shared::IpRange::try_from((first, last)) - } - (first, last) => { - unreachable!( - "Expected first/last address of an IP range to \ - both be of the same protocol version, but first = {:?} \ - and last = {:?}", - first, last, - ); - } - }; - maybe_range - .expect("Retrieved an out-of-order IP range pair from the database") +impl TryFrom<&IpPoolRange> for shared::IpRange { + type Error = IpRangeConversionError; + + fn try_from(range: &IpPoolRange) -> Result { + match (range.first_address.ip(), range.last_address.ip()) { + (IpAddr::V4(first), IpAddr::V4(last)) => { + shared::IpRange::try_from((first, last)).map_err(|e| { + IpRangeConversionError::InvalidRange { + msg: format!("Invalid IPv4 range: {e:#}",), + } + }) + } + (IpAddr::V6(first), IpAddr::V6(last)) => { + shared::IpRange::try_from((first, last)).map_err(|e| { + IpRangeConversionError::InvalidRange { + msg: format!("Invalid IPv6 range: {e:#}"), + } + }) + } + (first, last) => { + Err(IpRangeConversionError::MismatchedVersions { first, last }) + } + } } } diff --git a/nexus/reconfigurator/preparation/src/lib.rs b/nexus/reconfigurator/preparation/src/lib.rs index d400bb3e9b1..61b74ee735e 100644 --- a/nexus/reconfigurator/preparation/src/lib.rs +++ b/nexus/reconfigurator/preparation/src/lib.rs @@ -257,8 +257,17 @@ impl PlanningInputFromDb<'_> { } pub fn build(&self) -> Result { - let service_ip_pool_ranges = - self.ip_pool_range_rows.iter().map(IpRange::from).collect(); + let service_ip_pool_ranges = self + .ip_pool_range_rows + .iter() + .map(|range| { + IpRange::try_from(range).map_err(|e| { + Error::internal_error(&format!( + "invalid IP pool range in database: {e:#}" + )) + }) + }) + .collect::, _>>()?; let policy = Policy { service_ip_pool_ranges, target_boundary_ntp_zone_count: self.target_boundary_ntp_zone_count, diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 3275acd49e9..3e096ec4396 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -2048,8 +2048,8 @@ impl NexusExternalApi for NexusExternalApiImpl { .ip_pool_list_ranges(&opctx, &pool_lookup, &pag_params) .await? .into_iter() - .map(|range| range.into()) - .collect(); + .map(|range| range.try_into()) + .collect::, _>>()?; Ok(HttpResponseOk(ResultsPage::new( ranges, &EmptyScanParams {}, @@ -2080,7 +2080,7 @@ impl NexusExternalApi for NexusExternalApiImpl { let pool_lookup = nexus.ip_pool_lookup(&opctx, &path.pool)?; let out = nexus.ip_pool_add_range(&opctx, &pool_lookup, &range).await?; - Ok(HttpResponseCreated(out.into())) + Ok(HttpResponseCreated(out.try_into()?)) }; apictx .context @@ -2135,8 +2135,8 @@ impl NexusExternalApi for NexusExternalApiImpl { .ip_pool_service_list_ranges(&opctx, &pag_params) .await? .into_iter() - .map(|range| range.into()) - .collect(); + .map(|range| range.try_into()) + .collect::, _>>()?; Ok(HttpResponseOk(ResultsPage::new( ranges, &EmptyScanParams {}, @@ -2163,7 +2163,7 @@ impl NexusExternalApi for NexusExternalApiImpl { let nexus = &apictx.context.nexus; let range = range_params.into_inner(); let out = nexus.ip_pool_service_add_range(&opctx, &range).await?; - Ok(HttpResponseCreated(out.into())) + Ok(HttpResponseCreated(out.try_into()?)) }; apictx .context diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 6fc94ca29ee..cf62ea8fef4 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2270,7 +2270,10 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool_range ( /* FK into the `ip_pool` table. */ ip_pool_id UUID NOT NULL, /* Tracks child resources, IP addresses allocated out of this range. */ - rcgen INT8 NOT NULL + rcgen INT8 NOT NULL, + + /* Ensure first address is not greater than last address */ + CONSTRAINT check_address_order CHECK (first_address <= last_address) ); /* diff --git a/schema/crdb/multicast-pool-support/up01.sql b/schema/crdb/multicast-pool-support/up01.sql index fccfcd2081f..8435680ac9a 100644 --- a/schema/crdb/multicast-pool-support/up01.sql +++ b/schema/crdb/multicast-pool-support/up01.sql @@ -16,3 +16,9 @@ CREATE INDEX IF NOT EXISTS lookup_ip_pool_by_type ON omicron.public.ip_pool ( pool_type ) WHERE time_deleted IS NULL; + +-- Add CHECK constraint to ip_pool_range to ensure data integrity +-- Ensure first address is not greater than last address +ALTER TABLE omicron.public.ip_pool_range + ADD CONSTRAINT IF NOT EXISTS check_address_order + CHECK (first_address <= last_address); From f52ce736d95a2919ad32644680ecad9865339534 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Wed, 15 Oct 2025 08:17:50 +0000 Subject: [PATCH 7/7] [fix] db ordering --- nexus/db-model/src/ip_pool.rs | 4 ++-- nexus/db-schema/src/schema.rs | 2 +- schema/crdb/dbinit.sql | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nexus/db-model/src/ip_pool.rs b/nexus/db-model/src/ip_pool.rs index b52692991e4..b0d331b4cd3 100644 --- a/nexus/db-model/src/ip_pool.rs +++ b/nexus/db-model/src/ip_pool.rs @@ -160,14 +160,14 @@ pub struct IpPool { pub identity: IpPoolIdentity, /// The IP version of the pool. pub ip_version: IpVersion, - /// Pool type for unicast (default) vs multicast pools. - pub pool_type: IpPoolType, /// Child resource generation number, for optimistic concurrency control of /// the contained ranges. pub rcgen: i64, /// Indicates what the pool is reserved for. pub reservation_type: IpPoolReservationType, + /// Pool type for unicast (default) vs multicast pools. + pub pool_type: IpPoolType, } impl IpPool { diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index cb63440135d..654a11b8f3e 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -629,9 +629,9 @@ table! { time_modified -> Timestamptz, time_deleted -> Nullable, ip_version -> crate::enums::IpVersionEnum, - pool_type -> crate::enums::IpPoolTypeEnum, rcgen -> Int8, reservation_type -> crate::enums::IpPoolReservationTypeEnum, + pool_type -> crate::enums::IpPoolTypeEnum, } } diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 72ad161bd50..c927c560c17 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -2194,11 +2194,11 @@ CREATE TABLE IF NOT EXISTS omicron.public.ip_pool ( /* The IP version of the ranges contained in this pool. */ ip_version omicron.public.ip_version NOT NULL, - /* Pool type for unicast (default) vs multicast pools. */ - pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast', - /* Indicates what the IP Pool is reserved for. */ - reservation_type omicron.public.ip_pool_reservation_type NOT NULL + reservation_type omicron.public.ip_pool_reservation_type NOT NULL, + + /* Pool type for unicast (default) vs multicast pools. */ + pool_type omicron.public.ip_pool_type NOT NULL DEFAULT 'unicast' ); /*