From 3348dcd1a7a35683d244da56c915c916a31f7a0b Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Fri, 8 Mar 2024 16:24:31 +1300 Subject: [PATCH 01/15] Fix typo in function name --- internal/network/machine_network_cidr.go | 4 ++-- internal/provider/base.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index 1393d0f1d2..d40ff1f2cd 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -352,9 +352,9 @@ func GetSecondaryMachineCidr(cluster *common.Cluster) (string, error) { return "", errors.Errorf("unable to get secondary machine network for cluster %s", cluster.ID.String()) } -// GetMachineNetworksFromBoostrapHost used to collect machine networks from the cluster's bootstrap host. +// GetMachineNetworksFromBootstrapHost used to collect machine networks from the cluster's bootstrap host. // The function will get at most one IPv4 and one IPv6 network. -func GetMachineNetworksFromBoostrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { +func GetMachineNetworksFromBootstrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { if IsMachineCidrAvailable(cluster) { return cluster.MachineNetworks } diff --git a/internal/provider/base.go b/internal/provider/base.go index 557959d61f..d31b0f2f5d 100644 --- a/internal/provider/base.go +++ b/internal/provider/base.go @@ -47,7 +47,7 @@ func GetMachineNetworkForUserManagedNetworking(log logrus.FieldLogger, cluster * if bootstrapCidr != "" { log.Infof("Selected bootstrap machine network CIDR %s for cluster %s", bootstrapCidr, cluster.ID.String()) var machineNetwork []installcfg.MachineNetwork - cluster.MachineNetworks = network.GetMachineNetworksFromBoostrapHost(cluster, log) + cluster.MachineNetworks = network.GetMachineNetworksFromBootstrapHost(cluster, log) for _, net := range cluster.MachineNetworks { machineNetwork = append(machineNetwork, installcfg.MachineNetwork{Cidr: string(net.Cidr)}) } From def52bf8957cc3523c082d5957b4ca610dfcaced Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Fri, 8 Mar 2024 16:25:36 +1300 Subject: [PATCH 02/15] Refactor GetMachineNetworkForUserManagedNetworking() It's confusing that GetMachineNetworksFromBootstrapHost() returns the existing MachineNetworks in the cluster (and does *not* get them from the bootstrap host) if they already exist there. Refactor to make the logic clearer. --- internal/network/machine_network_cidr.go | 4 ---- internal/provider/base.go | 4 +++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index d40ff1f2cd..dfc88b0cbb 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -355,10 +355,6 @@ func GetSecondaryMachineCidr(cluster *common.Cluster) (string, error) { // GetMachineNetworksFromBootstrapHost used to collect machine networks from the cluster's bootstrap host. // The function will get at most one IPv4 and one IPv6 network. func GetMachineNetworksFromBootstrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { - if IsMachineCidrAvailable(cluster) { - return cluster.MachineNetworks - } - bootstrap := common.GetBootstrapHost(cluster) if bootstrap == nil { log.Warnf("No bootstrap found in cluster %s", cluster.ID) diff --git a/internal/provider/base.go b/internal/provider/base.go index d31b0f2f5d..69c1cbc622 100644 --- a/internal/provider/base.go +++ b/internal/provider/base.go @@ -46,8 +46,10 @@ func GetMachineNetworkForUserManagedNetworking(log logrus.FieldLogger, cluster * bootstrapCidr := network.GetPrimaryMachineCidrForUserManagedNetwork(cluster, log) if bootstrapCidr != "" { log.Infof("Selected bootstrap machine network CIDR %s for cluster %s", bootstrapCidr, cluster.ID.String()) + if !network.IsMachineCidrAvailable(cluster) { + cluster.MachineNetworks = network.GetMachineNetworksFromBootstrapHost(cluster, log) + } var machineNetwork []installcfg.MachineNetwork - cluster.MachineNetworks = network.GetMachineNetworksFromBootstrapHost(cluster, log) for _, net := range cluster.MachineNetworks { machineNetwork = append(machineNetwork, installcfg.MachineNetwork{Cidr: string(net.Cidr)}) } From b91e5d50060bea5d2703e93e20c6809cb6c5808b Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Fri, 8 Mar 2024 16:34:38 +1300 Subject: [PATCH 03/15] Use plural in function name --- internal/provider/base.go | 4 ++-- internal/provider/nutanix/installConfig.go | 2 +- internal/provider/vsphere/installConfig.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/provider/base.go b/internal/provider/base.go index 69c1cbc622..7957cb52d1 100644 --- a/internal/provider/base.go +++ b/internal/provider/base.go @@ -42,7 +42,7 @@ type Provider interface { PostCreateManifestsHook(cluster *common.Cluster, envVars *[]string, workDir string) error } -func GetMachineNetworkForUserManagedNetworking(log logrus.FieldLogger, cluster *common.Cluster) []installcfg.MachineNetwork { +func GetMachineNetworksForUserManagedNetworking(log logrus.FieldLogger, cluster *common.Cluster) []installcfg.MachineNetwork { bootstrapCidr := network.GetPrimaryMachineCidrForUserManagedNetwork(cluster, log) if bootstrapCidr != "" { log.Infof("Selected bootstrap machine network CIDR %s for cluster %s", bootstrapCidr, cluster.ID.String()) @@ -88,7 +88,7 @@ func replaceMachineNetworkIfNeeded(log logrus.FieldLogger, cluster *common.Clust } func ConfigureUserManagedNetworkingInInstallConfig(log logrus.FieldLogger, cluster *common.Cluster, cfg *installcfg.InstallerConfigBaremetal) { - cfg.Networking.MachineNetwork = GetMachineNetworkForUserManagedNetworking(log, cluster) + cfg.Networking.MachineNetwork = GetMachineNetworksForUserManagedNetworking(log, cluster) if cluster.NetworkType != nil { cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) } diff --git a/internal/provider/nutanix/installConfig.go b/internal/provider/nutanix/installConfig.go index e5ac215693..45a9c68f0a 100644 --- a/internal/provider/nutanix/installConfig.go +++ b/internal/provider/nutanix/installConfig.go @@ -60,7 +60,7 @@ func (p nutanixProvider) AddPlatformToInstallConfig( nPlatform.DeprecatedIngressVIP = network.GetIngressVipById(cluster, 0) } } else { - cfg.Networking.MachineNetwork = provider.GetMachineNetworkForUserManagedNetworking(p.Log, cluster) + cfg.Networking.MachineNetwork = provider.GetMachineNetworksForUserManagedNetworking(p.Log, cluster) if cluster.NetworkType != nil { cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) } diff --git a/internal/provider/vsphere/installConfig.go b/internal/provider/vsphere/installConfig.go index f4a92737dd..830eb0e91f 100644 --- a/internal/provider/vsphere/installConfig.go +++ b/internal/provider/vsphere/installConfig.go @@ -75,7 +75,7 @@ func (p vsphereProvider) AddPlatformToInstallConfig( vsPlatform.DeprecatedIngressVIP = network.GetIngressVipById(cluster, 0) } } else { - cfg.Networking.MachineNetwork = provider.GetMachineNetworkForUserManagedNetworking(p.Log, cluster) + cfg.Networking.MachineNetwork = provider.GetMachineNetworksForUserManagedNetworking(p.Log, cluster) if cluster.NetworkType != nil { cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) } From 484602b3b77e7519cf3ead8b1f29c137d0a34279 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 28 Feb 2024 16:26:44 +1300 Subject: [PATCH 04/15] Remove redundant code The network type is set for all platforms in getBasicInstallConfig(). There is no need to set it again in the none platform provider. --- internal/provider/base.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/provider/base.go b/internal/provider/base.go index 7957cb52d1..5a1be4ea7b 100644 --- a/internal/provider/base.go +++ b/internal/provider/base.go @@ -1,7 +1,6 @@ package provider import ( - "github.com/go-openapi/swag" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/internal/installcfg" @@ -89,9 +88,6 @@ func replaceMachineNetworkIfNeeded(log logrus.FieldLogger, cluster *common.Clust func ConfigureUserManagedNetworkingInInstallConfig(log logrus.FieldLogger, cluster *common.Cluster, cfg *installcfg.InstallerConfigBaremetal) { cfg.Networking.MachineNetwork = GetMachineNetworksForUserManagedNetworking(log, cluster) - if cluster.NetworkType != nil { - cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) - } if common.IsSingleNodeCluster(cluster) { From 270fcd6524716d486086d7ff976d4243fa422aa1 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Fri, 8 Mar 2024 17:04:10 +1300 Subject: [PATCH 05/15] Use fmt.Errorf() instead of deprecated errors package --- internal/network/cidr_validations.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/internal/network/cidr_validations.go b/internal/network/cidr_validations.go index 3618023ccb..5d1225adae 100644 --- a/internal/network/cidr_validations.go +++ b/internal/network/cidr_validations.go @@ -4,7 +4,6 @@ import ( "net" "github.com/openshift/assisted-service/models" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -21,7 +20,7 @@ const MinSNOMachineMaskDelta = 1 func parseCIDR(cidr string) (ip net.IP, ipnet *net.IPNet, err error) { ip, ipnet, err = net.ParseCIDR(cidr) if err != nil { - err = errors.Wrapf(err, "Failed to parse CIDR '%s'", cidr) + err = fmt.Errorf("Failed to parse CIDR '%s': %w", cidr, err) } return } @@ -50,7 +49,7 @@ func VerifyNetworksNotOverlap(aCidrStr, bCidrStr string) error { } if overlap { - return errors.Errorf("CIDRS %s and %s overlap", aCidrStr, bCidrStr) + return fmt.Errorf("CIDRS %s and %s overlap", aCidrStr, bCidrStr) } return nil } @@ -63,13 +62,13 @@ func verifySubnetCIDR(cidrStr string, minSubnetMaskSize int) error { ones, bits := cidr.Mask.Size() // We would like to allow enough addresses. Therefore, ones must be not greater than (bits-minSubnetMaskSize) if ones < 1 || ones > bits-minSubnetMaskSize { - return errors.Errorf("Address mask size must be between 1 to %d and must include at least %d addresses", bits-minSubnetMaskSize, 1< bits-MinMaskDelta { - return errors.Errorf("Host prefix, now %d, must be less than or equal to %d to allow at least 128 addresses", hostNetworkPrefix, bits-MinMaskDelta) + return fmt.Errorf("Host prefix, now %d, must be less than or equal to %d to allow at least 128 addresses", hostNetworkPrefix, bits-MinMaskDelta) } var requestedNumHosts int if numberOfHosts == 1 { @@ -119,7 +118,7 @@ func VerifyClusterCidrSize(hostNetworkPrefix int, clusterNetworkCIDR string, num // 63 to avoid overflow possibleNumHosts := uint64(1) << min(63, max(hostNetworkPrefix-clusterNetworkPrefix, 0)) if uint64(requestedNumHosts) > possibleNumHosts { - return errors.Errorf("Cluster network CIDR prefix %d does not contain enough addresses for %d hosts each one with %d prefix (%d addresses)", + return fmt.Errorf("Cluster network CIDR prefix %d does not contain enough addresses for %d hosts each one with %d prefix (%d addresses)", clusterNetworkPrefix, numberOfHosts, hostNetworkPrefix, uint64(1)< Date: Thu, 29 Feb 2024 16:45:41 +1300 Subject: [PATCH 06/15] MGMT-7587: Check all networks for CIDR overlap None of the subnets specified in any of the machineNetworks, clusterNetworks, or serviceNetworks should overlap. Validate all of these combinations, as openshift-installer does, instead of making assumptions about indices being aligned to address families. --- internal/bminventory/inventory.go | 21 +---------- internal/cluster/transition_test.go | 6 +-- internal/cluster/validator.go | 30 +-------------- internal/network/cidr_validations.go | 56 +++++++++++++++++++++------- 4 files changed, 49 insertions(+), 64 deletions(-) diff --git a/internal/bminventory/inventory.go b/internal/bminventory/inventory.go index ec0a836474..3e410a345d 100644 --- a/internal/bminventory/inventory.go +++ b/internal/bminventory/inventory.go @@ -2548,25 +2548,8 @@ func (b *bareMetalInventory) updateNetworks(db *gorm.DB, params installer.V2Upda if params.ClusterUpdateParams.ClusterNetworks != nil || params.ClusterUpdateParams.ServiceNetworks != nil || params.ClusterUpdateParams.MachineNetworks != nil { - // TODO MGMT-7587: Support any number of subnets - // Assumes that the number of cluster networks equal to the number of service networks - for index := range cluster.ClusterNetworks { - machineNetworkCidr := "" - if len(cluster.MachineNetworks) > index { - machineNetworkCidr = string(cluster.MachineNetworks[index].Cidr) - } - - serviceNetworkCidr := "" - if len(cluster.ServiceNetworks) > index { - serviceNetworkCidr = string(cluster.ServiceNetworks[index].Cidr) - } - - if err = network.VerifyClusterCIDRsNotOverlap(machineNetworkCidr, - string(cluster.ClusterNetworks[index].Cidr), - serviceNetworkCidr, - userManagedNetworking); err != nil { - return common.NewApiError(http.StatusBadRequest, err) - } + if err := network.VerifyNoNetworkCidrOverlaps(cluster.ClusterNetworks, cluster.MachineNetworks, cluster.ServiceNetworks); err != nil { + return common.NewApiError(http.StatusBadRequest, err) } } if updated { diff --git a/internal/cluster/transition_test.go b/internal/cluster/transition_test.go index 452b9a7fd2..462d1d1831 100644 --- a/internal/cluster/transition_test.go +++ b/internal/cluster/transition_test.go @@ -2295,7 +2295,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ClusterNetworkCidr: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and cluster network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), @@ -2334,7 +2334,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ServiceNetworkCIDR: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and service network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), @@ -2843,7 +2843,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ServiceNetworkCIDR: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and service network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), diff --git a/internal/cluster/validator.go b/internal/cluster/validator.go index 5b55d1ba77..6404785223 100644 --- a/internal/cluster/validator.go +++ b/internal/cluster/validator.go @@ -412,27 +412,6 @@ func (v *clusterValidator) isDNSDomainDefined(c *clusterPreprocessContext) (Vali return ValidationFailure, "The base domain is undefined and must be provided." } -func checkCidrsOverlapping(cluster *common.Cluster) error { - //Currently, the networks arrays can hold up to 2 subnets, one for each family - //in the same order. If machine networks is defined we assume it follows the - //same conversion - var machineNetworkCidr, clusterNetworkCidr, serviceNetworkCidr string - for index := range cluster.ClusterNetworks { - if index < len(cluster.MachineNetworks) { - machineNetworkCidr = string(cluster.MachineNetworks[index].Cidr) - } - clusterNetworkCidr = string(cluster.ClusterNetworks[index].Cidr) - serviceNetworkCidr = string(cluster.ServiceNetworks[index].Cidr) - - if err := network.VerifyClusterCIDRsNotOverlap(machineNetworkCidr, - clusterNetworkCidr, serviceNetworkCidr, - network.IsMachineNetworkRequired(cluster)); err != nil { - return err - } - } - return nil -} - func (v *clusterValidator) noCidrsOverlapping(c *clusterPreprocessContext) (ValidationStatus, string) { clusterCidrDefined, _ := v.isClusterCidrDefined(c) serviceCidrDefined, _ := v.isServiceCidrDefined(c) @@ -444,13 +423,8 @@ func (v *clusterValidator) noCidrsOverlapping(c *clusterPreprocessContext) (Vali } return ValidationPending, "At least one of the CIDRs (Machine Network, Cluster Network, Service Network) is undefined." } - if len(c.cluster.ClusterNetworks) != len(c.cluster.ServiceNetworks) { - // TODO MGMT-7587: Support any number of subnets - // Assumes that the number of cluster networks equal to the number of service networks - return ValidationError, "A mismatch between the number of Cluster and Service networks" - } - if err := checkCidrsOverlapping(c.cluster); err != nil { - return ValidationFailure, fmt.Sprintf("CIDRS Overlapping: %s.", err.Error()) + if err := network.VerifyNoNetworkCidrOverlaps(c.cluster.ClusterNetworks, c.cluster.MachineNetworks, c.cluster.ServiceNetworks); err != nil { + return ValidationFailure, err.Error() } return ValidationSuccess, "No CIDRS are overlapping." } diff --git a/internal/network/cidr_validations.go b/internal/network/cidr_validations.go index 5d1225adae..bd49f3b537 100644 --- a/internal/network/cidr_validations.go +++ b/internal/network/cidr_validations.go @@ -1,6 +1,8 @@ package network import ( + "errors" + "fmt" "net" "github.com/openshift/assisted-service/models" @@ -38,12 +40,12 @@ func NetworksOverlap(aCidrStr, bCidrStr string) (bool, error) { return acidr.Contains(bcidr.IP) || bcidr.Contains(acidr.IP), nil } -func VerifyNetworksNotOverlap(aCidrStr, bCidrStr string) error { +func VerifyNetworksNotOverlap(aCidrStr, bCidrStr models.Subnet) error { if aCidrStr == "" || bCidrStr == "" { return nil } - overlap, err := NetworksOverlap(aCidrStr, bCidrStr) + overlap, err := NetworksOverlap(string(aCidrStr), string(bCidrStr)) if err != nil { return err } @@ -124,22 +126,48 @@ func VerifyClusterCidrSize(hostNetworkPrefix int, clusterNetworkCIDR string, num return nil } -func VerifyClusterCIDRsNotOverlap(machineNetworkCidr, clusterNetworkCidr, serviceNetworkCidr string, machineNetworkRequired bool) error { - if machineNetworkRequired { - err := VerifyNetworksNotOverlap(machineNetworkCidr, serviceNetworkCidr) - if err != nil { - return errors.Wrap(err, "MachineNetworkCIDR and ServiceNetworkCIDR") +func VerifyNoNetworkCidrOverlaps( + clusterNetworks []*models.ClusterNetwork, + machineNetworks []*models.MachineNetwork, + serviceNetworks []*models.ServiceNetwork) error { + errs := []error{} + for imn, mn := range machineNetworks { + for _, cn := range clusterNetworks { + if err := VerifyNetworksNotOverlap(mn.Cidr, cn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("%w (machine network and cluster network)", err)) + } } - err = VerifyNetworksNotOverlap(machineNetworkCidr, clusterNetworkCidr) - if err != nil { - return errors.Wrap(err, "MachineNetworkCIDR and ClusterNetworkCidr") + for _, sn := range serviceNetworks { + if err := VerifyNetworksNotOverlap(mn.Cidr, sn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("%w (machine network and service network)", err)) + } + } + for _, omn := range machineNetworks[:imn] { + if err := VerifyNetworksNotOverlap(mn.Cidr, omn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("invalid machine networks: %w", err)) + } } } - err := VerifyNetworksNotOverlap(serviceNetworkCidr, clusterNetworkCidr) - if err != nil { - return errors.Wrap(err, "ServiceNetworkCidr and ClusterNetworkCidr") + for icn, cn := range clusterNetworks { + for _, sn := range serviceNetworks { + if err := VerifyNetworksNotOverlap(sn.Cidr, cn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("%w (service network and cluster network)", err)) + } + } + for _, ocn := range clusterNetworks[:icn] { + if err := VerifyNetworksNotOverlap(cn.Cidr, ocn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("invalid cluster networks: %w", err)) + } + } } - return nil + for isn, sn := range serviceNetworks { + for _, osn := range serviceNetworks[:isn] { + if err := VerifyNetworksNotOverlap(sn.Cidr, osn.Cidr); err != nil { + errs = append(errs, fmt.Errorf("invalid service networks: %w", err)) + } + } + } + return errors.Join(errs...) } func VerifyNetworkHostPrefix(prefix int64) error { From ed86360e704a10f68c867645966d53bc47d629d7 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 20 Aug 2024 21:44:50 +1200 Subject: [PATCH 07/15] Define constants for VIP types --- internal/cluster/transition_test.go | 8 ++++---- internal/cluster/validator.go | 17 ++++++++++------- internal/cluster/validator_test.go | 12 ++++++------ internal/network/machine_network_cidr.go | 15 +++++++++++---- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/internal/cluster/transition_test.go b/internal/cluster/transition_test.go index 462d1d1831..c1a428c437 100644 --- a/internal/cluster/transition_test.go +++ b/internal/cluster/transition_test.go @@ -3455,9 +3455,9 @@ var _ = Describe("Refresh Cluster - With DHCP", func() { validationsChecker: makeJsonChecker(map[ValidationID]validationCheckResult{ IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine Network CIDR is defined"}, AreApiVipsDefined: {status: ValidationSuccess, messagePattern: "API virtual IPs are defined"}, - AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api vips <10.10.10.12> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api-vip <10.10.10.12> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AreIngressVipsDefined: {status: ValidationSuccess, messagePattern: "Ingress virtual IPs are defined"}, - AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress vips <10.10.10.13> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress-vip <10.10.10.13> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AllHostsAreReadyToInstall: {status: ValidationSuccess, messagePattern: "All hosts in the cluster are ready to install."}, IsDNSDomainDefined: {status: ValidationSuccess, messagePattern: "The base domain is defined"}, IsPullSecretSet: {status: ValidationSuccess, messagePattern: "The pull secret is set."}, @@ -3486,9 +3486,9 @@ var _ = Describe("Refresh Cluster - With DHCP", func() { IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine Network CIDR is defined"}, IsMachineCidrEqualsToCalculatedCidr: {status: ValidationSuccess, messagePattern: "Cluster Machine CIDR is equivalent to the calculated CIDR"}, AreApiVipsDefined: {status: ValidationSuccess, messagePattern: "API virtual IPs are defined"}, - AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api vips <%s> is already in use in cidr %s", common.TestIPv4Networking.APIVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api-vip <%s> is already in use in cidr %s", common.TestIPv4Networking.APIVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AreIngressVipsDefined: {status: ValidationSuccess, messagePattern: "Ingress virtual IPs are defined"}, - AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress vips <%s> is already in use in cidr %s", common.TestIPv4Networking.IngressVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress-vip <%s> is already in use in cidr %s", common.TestIPv4Networking.IngressVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AllHostsAreReadyToInstall: {status: ValidationSuccess, messagePattern: "All hosts in the cluster are ready to install."}, IsDNSDomainDefined: {status: ValidationSuccess, messagePattern: "The base domain is defined"}, IsPullSecretSet: {status: ValidationSuccess, messagePattern: "The pull secret is set."}, diff --git a/internal/cluster/validator.go b/internal/cluster/validator.go index 6404785223..3529b33e04 100644 --- a/internal/cluster/validator.go +++ b/internal/cluster/validator.go @@ -154,15 +154,17 @@ type VipsWrapper interface { IP(index int) string Verification(index int) *models.VipVerification GetVips() []string + Type() network.VipType } type ApiVipsWrapper struct { c *clusterPreprocessContext } -func (a *ApiVipsWrapper) Name() string { return "API" } -func (a *ApiVipsWrapper) Len() int { return len(a.c.cluster.APIVips) } -func (a *ApiVipsWrapper) IP(index int) string { return string(a.c.cluster.APIVips[index].IP) } +func (a *ApiVipsWrapper) Name() string { return "API" } +func (a *ApiVipsWrapper) Type() network.VipType { return network.VipTypeAPI } +func (a *ApiVipsWrapper) Len() int { return len(a.c.cluster.APIVips) } +func (a *ApiVipsWrapper) IP(index int) string { return string(a.c.cluster.APIVips[index].IP) } func (a *ApiVipsWrapper) Verification(index int) *models.VipVerification { return a.c.cluster.APIVips[index].Verification } @@ -174,9 +176,10 @@ type IngressVipsWrapper struct { c *clusterPreprocessContext } -func (i *IngressVipsWrapper) Name() string { return "Ingress" } -func (i *IngressVipsWrapper) Len() int { return len(i.c.cluster.IngressVips) } -func (i *IngressVipsWrapper) IP(index int) string { return string(i.c.cluster.IngressVips[index].IP) } +func (i *IngressVipsWrapper) Name() string { return "Ingress" } +func (i *IngressVipsWrapper) Type() network.VipType { return network.VipTypeIngress } +func (i *IngressVipsWrapper) Len() int { return len(i.c.cluster.IngressVips) } +func (i *IngressVipsWrapper) IP(index int) string { return string(i.c.cluster.IngressVips[index].IP) } func (i *IngressVipsWrapper) Verification(index int) *models.VipVerification { return i.c.cluster.IngressVips[index].Verification } @@ -238,7 +241,7 @@ func (v *clusterValidator) areVipsValid(c *clusterPreprocessContext, vipsWrapper failed := false for i := 0; i != vipsWrapper.Len(); i++ { - verification, err := network.VerifyVip(c.cluster.Hosts, network.GetMachineCidrById(c.cluster, i), vipsWrapper.IP(i), name, + verification, err := network.VerifyVip(c.cluster.Hosts, network.GetMachineCidrById(c.cluster, i), vipsWrapper.IP(i), vipsWrapper.Type(), vipsWrapper.Verification(i), v.log) failed = failed || verification != models.VipVerificationSucceeded if err != nil { diff --git a/internal/cluster/validator_test.go b/internal/cluster/validator_test.go index 3ed5c0ee50..ef7ac08dc5 100644 --- a/internal/cluster/validator_test.go +++ b/internal/cluster/validator_test.go @@ -365,7 +365,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1.2.3.[56]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1.2.3.[56]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("ipv4 vips verified", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -379,7 +379,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1001:db8::6[45]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1001:db8::6[45]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("ipv4 vips verified", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -393,7 +393,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1001:db8::6[45]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1001:db8::6[45]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("all successful", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -422,7 +422,7 @@ var _ = Describe("areVipsValid", func() { preprocessContext.cluster.IngressVips[0].Verification = common.VipVerificationPtr(models.VipVerificationFailed) status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1.2.3.[56]> is already in use in cidr 1.2.3.0/24", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1.2.3.[56]> is already in use in cidr 1.2.3.0/24", strings.ToLower(lcontext.name)))) }) }) } @@ -515,7 +515,7 @@ var _ = Describe("Validator tests", func() { }} status, message := validator.areApiVipsValid(preprocessContext) Expect(status).To(Equal(ValidationFailure)) - Expect(message).To(Equal("api vips <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) + Expect(message).To(Equal("api-vip <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) // Now try with an API VIP that is not a broadcast address, this should pass validation. preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -542,7 +542,7 @@ var _ = Describe("Validator tests", func() { }} status, message := validator.areIngressVipsValid(preprocessContext) Expect(status).To(Equal(ValidationFailure)) - Expect(message).To(Equal("ingress vips <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) + Expect(message).To(Equal("ingress-vip <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) // Now try with an Ingress VIP that is not a broadcast address, this should pass validation. preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index dfc88b0cbb..723a542958 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -17,6 +17,13 @@ import ( "github.com/sirupsen/logrus" ) +type VipType string + +const ( + VipTypeAPI VipType = "api-vip" + VipTypeIngress VipType = "ingress-vip" +) + func getVIPInterfaceNetwork(vip net.IP, addresses []string) *net.IPNet { for _, addr := range addresses { _, ipnet, err := net.ParseCIDR(addr) @@ -116,7 +123,7 @@ func VerifyVipFree(hosts []*models.Host, vip string, machineNetworkCidr string, return IpInFreeList(hosts, vip, machineNetworkCidr, log) } -func VerifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipName string, verification *models.VipVerification, log logrus.FieldLogger) (models.VipVerification, error) { +func VerifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipName VipType, verification *models.VipVerification, log logrus.FieldLogger) (models.VipVerification, error) { if machineNetworkCidr == "" { return models.VipVerificationUnverified, errors.Errorf("%s <%s> cannot be set if Machine Network CIDR is empty", vipName, vip) } @@ -139,7 +146,7 @@ func VerifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipN msg = fmt.Sprintf("%s. The machine network range is too small for the cluster. Please redefine the network.", msg) } case models.VipVerificationUnverified: - msg = fmt.Sprintf("%s <%s> are not verified yet.", vipName, vip) + msg = fmt.Sprintf("%s <%s> is not verified yet.", vipName, vip) } return ret, errors.New(msg) } @@ -195,10 +202,10 @@ func ValidateNoVIPAddressesDuplicates(apiVips []*models.APIVip, ingressVips []*m // The assumption is that VIPs are eventually verified by cluster validation // (i.e api-vips-valid, ingress-vips-valid) func VerifyVips(hosts []*models.Host, machineNetworkCidr string, apiVip string, ingressVip string, log logrus.FieldLogger) error { - verification, err := VerifyVip(hosts, machineNetworkCidr, apiVip, "api-vip", nil, log) + verification, err := VerifyVip(hosts, machineNetworkCidr, apiVip, VipTypeAPI, nil, log) // Error is ignored if the verification didn't fail if verification != models.VipVerificationFailed { - verification, err = VerifyVip(hosts, machineNetworkCidr, ingressVip, "ingress-vip", nil, log) + verification, err = VerifyVip(hosts, machineNetworkCidr, ingressVip, VipTypeIngress, nil, log) } if verification != models.VipVerificationFailed { return ValidateNoVIPAddressesDuplicates([]*models.APIVip{{IP: models.IP(apiVip)}}, []*models.IngressVip{{IP: models.IP(ingressVip)}}) From 44bb20e04dcaa43e6c815d0c0d1226a8360ffcd1 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Thu, 29 Feb 2024 17:03:26 +1300 Subject: [PATCH 08/15] Validate VIPs against all MachineNetworks Don't make assumptions about a 1:1 mapping between MachineNetworks and VIPs. Check only that the VIP is a member of any MachineNetwork. --- internal/bminventory/inventory.go | 26 ++++-------- internal/bminventory/inventory_test.go | 42 ++----------------- internal/cluster/transition_test.go | 4 +- internal/cluster/validations/validations.go | 2 +- internal/cluster/validator.go | 2 +- internal/network/machine_network_cidr.go | 42 ++++++++++--------- internal/network/machine_network_cidr_test.go | 15 ++++--- 7 files changed, 49 insertions(+), 84 deletions(-) diff --git a/internal/bminventory/inventory.go b/internal/bminventory/inventory.go index 3e410a345d..cdbf7f963a 100644 --- a/internal/bminventory/inventory.go +++ b/internal/bminventory/inventory.go @@ -2235,48 +2235,39 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(cluster *common.Cluster, return common.NewApiError(http.StatusBadRequest, err) } if interactivity == Interactive && (params.ClusterUpdateParams.APIVips != nil || params.ClusterUpdateParams.IngressVips != nil) { - var primaryMachineNetworkCidr string - var secondaryMachineNetworkCidr string - matchRequired := network.GetApiVipById(&targetConfiguration, 0) != "" || network.GetIngressVipById(&targetConfiguration, 0) != "" // We want to calculate Machine Network based on the API/Ingress VIPs only in case of the // single-stack cluster. Auto calculation is not supported for dual-stack in which we // require that user explicitly provides all the Machine Networks. if reqDualStack { - if params.ClusterUpdateParams.MachineNetworks != nil { - primaryMachineNetworkCidr = string(params.ClusterUpdateParams.MachineNetworks[0].Cidr) - } else { - primaryMachineNetworkCidr = network.GetPrimaryMachineCidrForUserManagedNetwork(cluster, log) - } - err = network.VerifyMachineNetworksDualStack(targetConfiguration.MachineNetworks, reqDualStack) if err != nil { log.WithError(err).Warnf("Verify dual-stack machine networks") return common.NewApiError(http.StatusBadRequest, err) } - secondaryMachineNetworkCidr, err = network.GetSecondaryMachineCidr(&targetConfiguration) - if err != nil { - return common.NewApiError(http.StatusBadRequest, err) - } - if err = network.VerifyVips(cluster.Hosts, primaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, targetConfiguration.MachineNetworks, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } if len(targetConfiguration.IngressVips) == 2 && len(targetConfiguration.APIVips) == 2 { // in case there's a second set of VIPs - if err = network.VerifyVips(cluster.Hosts, secondaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 1), network.GetIngressVipById(&targetConfiguration, 1), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, targetConfiguration.MachineNetworks, network.GetApiVipById(&targetConfiguration, 1), network.GetIngressVipById(&targetConfiguration, 1), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } } } else { - primaryMachineNetworkCidr, err = network.CalculateMachineNetworkCIDR(network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), cluster.Hosts, matchRequired) + // TODO: OCPBUGS-30730 this function needs to be changed to allow + // returning multiple MachineNetworks, so the API VIP and + // Ingress VIP can be in different L2 networks. + primaryMachineNetworkCidr, err := network.CalculateMachineNetworkCIDR(network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), cluster.Hosts, matchRequired) if err != nil { return common.NewApiError(http.StatusBadRequest, errors.Wrap(err, "Calculate machine network CIDR")) } + calculatedMachineNetworks := []*models.MachineNetwork{} if primaryMachineNetworkCidr != "" { // We set the machine networks in the ClusterUpdateParams, so they will be viewed as part of the request // to update the cluster @@ -2285,8 +2276,9 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(cluster *common.Cluster, // returned with an error. Therefore, params.ClusterUpdateParams.MachineNetworks is empty here before // the assignment below. params.ClusterUpdateParams.MachineNetworks = []*models.MachineNetwork{{Cidr: models.Subnet(primaryMachineNetworkCidr)}} + calculatedMachineNetworks = params.ClusterUpdateParams.MachineNetworks } - if err = network.VerifyVips(cluster.Hosts, primaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, calculatedMachineNetworks, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } diff --git a/internal/bminventory/inventory_test.go b/internal/bminventory/inventory_test.go index 5e555f6780..b14e14fd5a 100644 --- a/internal/bminventory/inventory_test.go +++ b/internal/bminventory/inventory_test.go @@ -4208,7 +4208,7 @@ var _ = Describe("cluster", func() { }, }) verifyApiErrorString(reply, http.StatusBadRequest, - "ingress-vip <1.2.3.20> does not belong to machine-network-cidr <10.11.0.0/16>") + "ingress-vip <1.2.3.20> does not belong to any Machine Network") }) It("Same api and ingress", func() { apiVip := "10.11.12.15" @@ -4389,7 +4389,7 @@ var _ = Describe("cluster", func() { MachineNetworks: []*models.MachineNetwork{{Cidr: "10.12.0.0/16"}, {Cidr: "fd2e:6f44:5dd8:c956::/120"}}, }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to machine-network-cidr <10.12.0.0/16>") + verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to any Machine Network") }) }) @@ -14874,7 +14874,7 @@ location = "%s" VipDhcpAllocation: swag.Bool(false), }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to machine-network-cidr <1.2.3.0/24>") + verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to any Machine Network") }) It("Ingress VIP not in Machine Network", func() { apiVip := "1.2.3.5" @@ -14890,7 +14890,7 @@ location = "%s" VipDhcpAllocation: swag.Bool(false), }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <10.11.12.16> does not belong to machine-network-cidr <1.2.3.0/24>") + verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <10.11.12.16> does not belong to any Machine Network") }) It("API VIP and Ingress VIP with empty Machine Networks", func() { apiVip := "10.11.12.15" @@ -14907,40 +14907,6 @@ location = "%s" }) verifyApiErrorString(reply, http.StatusBadRequest, "Dual-stack cluster cannot be created with empty Machine Networks") }) - - It("API VIP from IPv6 Machine Network", func() { - apiVip := "1001:db8::64" - ingressVip := "1.2.3.6" - - reply := bm.V2RegisterCluster(ctx, installer.V2RegisterClusterParams{ - NewClusterParams: &models.ClusterCreateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: common.TestDualStackNetworking.ClusterNetworks, - MachineNetworks: common.TestDualStackNetworking.MachineNetworks, - ServiceNetworks: common.TestDualStackNetworking.ServiceNetworks, - VipDhcpAllocation: swag.Bool(false), - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <1001:db8::64> does not belong to machine-network-cidr <1.2.3.0/24>") - }) - - It("Ingress VIP from IPv6 Machine Network", func() { - apiVip := "1.2.3.5" - ingressVip := "1001:db8::65" - - reply := bm.V2RegisterCluster(ctx, installer.V2RegisterClusterParams{ - NewClusterParams: &models.ClusterCreateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: common.TestDualStackNetworking.ClusterNetworks, - MachineNetworks: common.TestDualStackNetworking.MachineNetworks, - ServiceNetworks: common.TestDualStackNetworking.ServiceNetworks, - VipDhcpAllocation: swag.Bool(false), - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <1001:db8::65> does not belong to machine-network-cidr <1.2.3.0/24>") - }) }) Context("V2 Update cluster", func() { diff --git a/internal/cluster/transition_test.go b/internal/cluster/transition_test.go index c1a428c437..ecd62ebebb 100644 --- a/internal/cluster/transition_test.go +++ b/internal/cluster/transition_test.go @@ -3455,9 +3455,9 @@ var _ = Describe("Refresh Cluster - With DHCP", func() { validationsChecker: makeJsonChecker(map[ValidationID]validationCheckResult{ IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine Network CIDR is defined"}, AreApiVipsDefined: {status: ValidationSuccess, messagePattern: "API virtual IPs are defined"}, - AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api-vip <10.10.10.12> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreApiVipsValid: {status: ValidationFailure, messagePattern: "api-vip <10.10.10.12> does not belong to any Machine Network"}, AreIngressVipsDefined: {status: ValidationSuccess, messagePattern: "Ingress virtual IPs are defined"}, - AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress-vip <10.10.10.13> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreIngressVipsValid: {status: ValidationFailure, messagePattern: "ingress-vip <10.10.10.13> does not belong to any Machine Network"}, AllHostsAreReadyToInstall: {status: ValidationSuccess, messagePattern: "All hosts in the cluster are ready to install."}, IsDNSDomainDefined: {status: ValidationSuccess, messagePattern: "The base domain is defined"}, IsPullSecretSet: {status: ValidationSuccess, messagePattern: "The pull secret is set."}, diff --git a/internal/cluster/validations/validations.go b/internal/cluster/validations/validations.go index 7481481e6f..f3bfb6f97a 100644 --- a/internal/cluster/validations/validations.go +++ b/internal/cluster/validations/validations.go @@ -501,7 +501,7 @@ func validateVIPAddresses(ipV6Supported bool, targetConfiguration common.Cluster } else { if len(targetConfiguration.MachineNetworks) > 0 { for i := range targetConfiguration.APIVips { // len of APIVips and IngressVips should be the same. asserted above. - err = network.VerifyVips(nil, string(targetConfiguration.MachineNetworks[i].Cidr), + err = network.VerifyVips(nil, targetConfiguration.MachineNetworks, string(targetConfiguration.APIVips[i].IP), string(targetConfiguration.IngressVips[i].IP), nil) if err != nil { multiErr = multierror.Append(multiErr, err) diff --git a/internal/cluster/validator.go b/internal/cluster/validator.go index 3529b33e04..a8288aa5bb 100644 --- a/internal/cluster/validator.go +++ b/internal/cluster/validator.go @@ -241,7 +241,7 @@ func (v *clusterValidator) areVipsValid(c *clusterPreprocessContext, vipsWrapper failed := false for i := 0; i != vipsWrapper.Len(); i++ { - verification, err := network.VerifyVip(c.cluster.Hosts, network.GetMachineCidrById(c.cluster, i), vipsWrapper.IP(i), vipsWrapper.Type(), + verification, err := network.VerifyVip(c.cluster.Hosts, c.cluster.MachineNetworks, vipsWrapper.IP(i), vipsWrapper.Type(), vipsWrapper.Verification(i), v.log) failed = failed || verification != models.VipVerificationSucceeded if err != nil { diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index 723a542958..086956ddd3 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -123,12 +123,28 @@ func VerifyVipFree(hosts []*models.Host, vip string, machineNetworkCidr string, return IpInFreeList(hosts, vip, machineNetworkCidr, log) } -func VerifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipName VipType, verification *models.VipVerification, log logrus.FieldLogger) (models.VipVerification, error) { - if machineNetworkCidr == "" { +func findMachineNetworkForVip(vip string, machineNetworks []*models.MachineNetwork) (bool, string) { + machineNetworkDefined := false + for _, machineNetwork := range machineNetworks { + if machineNetwork == nil || machineNetwork.Cidr == "" { + continue + } + machineNetworkDefined = true + if cidr := string(machineNetwork.Cidr); ipInCidr(vip, cidr) { + return true, cidr + } + } + return machineNetworkDefined, "" +} + +func VerifyVip(hosts []*models.Host, machineNetworks []*models.MachineNetwork, vip string, vipName VipType, verification *models.VipVerification, log logrus.FieldLogger) (models.VipVerification, error) { + machineNetworkDefined, machineNetworkCidr := findMachineNetworkForVip(vip, machineNetworks) + + if !machineNetworkDefined { return models.VipVerificationUnverified, errors.Errorf("%s <%s> cannot be set if Machine Network CIDR is empty", vipName, vip) } - if !ipInCidr(vip, machineNetworkCidr) { - return models.VipVerificationFailed, errors.Errorf("%s <%s> does not belong to machine-network-cidr <%s>", vipName, vip, machineNetworkCidr) + if machineNetworkCidr == "" { + return models.VipVerificationFailed, errors.Errorf("%s <%s> does not belong to any Machine Network", vipName, vip) } if ipIsBroadcast(vip, machineNetworkCidr) { return models.VipVerificationFailed, errors.Errorf("%s <%s> is the broadcast address of machine-network-cidr <%s>", vipName, vip, machineNetworkCidr) @@ -201,11 +217,11 @@ func ValidateNoVIPAddressesDuplicates(apiVips []*models.APIVip, ingressVips []*m // This function is called from places which assume it is OK for a VIP to be unverified. // The assumption is that VIPs are eventually verified by cluster validation // (i.e api-vips-valid, ingress-vips-valid) -func VerifyVips(hosts []*models.Host, machineNetworkCidr string, apiVip string, ingressVip string, log logrus.FieldLogger) error { - verification, err := VerifyVip(hosts, machineNetworkCidr, apiVip, VipTypeAPI, nil, log) +func VerifyVips(hosts []*models.Host, machineNetworks []*models.MachineNetwork, apiVip string, ingressVip string, log logrus.FieldLogger) error { + verification, err := VerifyVip(hosts, machineNetworks, apiVip, VipTypeAPI, nil, log) // Error is ignored if the verification didn't fail if verification != models.VipVerificationFailed { - verification, err = VerifyVip(hosts, machineNetworkCidr, ingressVip, VipTypeIngress, nil, log) + verification, err = VerifyVip(hosts, machineNetworks, ingressVip, VipTypeIngress, nil, log) } if verification != models.VipVerificationFailed { return ValidateNoVIPAddressesDuplicates([]*models.APIVip{{IP: models.IP(apiVip)}}, []*models.IngressVip{{IP: models.IP(ingressVip)}}) @@ -347,18 +363,6 @@ func GetPrimaryMachineCidrForUserManagedNetwork(cluster *common.Cluster, log log return "" } -func GetSecondaryMachineCidr(cluster *common.Cluster) (string, error) { - var secondaryMachineCidr string - - if IsMachineCidrAvailable(cluster) { - secondaryMachineCidr = GetMachineCidrById(cluster, 1) - } - if secondaryMachineCidr != "" { - return secondaryMachineCidr, nil - } - return "", errors.Errorf("unable to get secondary machine network for cluster %s", cluster.ID.String()) -} - // GetMachineNetworksFromBootstrapHost used to collect machine networks from the cluster's bootstrap host. // The function will get at most one IPv4 and one IPv6 network. func GetMachineNetworksFromBootstrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go index e5c12d55a2..a5d32de28f 100644 --- a/internal/network/machine_network_cidr_test.go +++ b/internal/network/machine_network_cidr_test.go @@ -137,6 +137,9 @@ var _ = Describe("inventory", func() { var ( log logrus.FieldLogger primaryMachineCidr = "1.2.4.0/23" + machineNetworks = []*models.MachineNetwork{ + {Cidr: models.Subnet(primaryMachineCidr)}, + } ) BeforeEach(func() { @@ -151,7 +154,7 @@ var _ = Describe("inventory", func() { }, } cluster.IngressVips = []*models.IngressVip{{IP: models.IP(GetApiVipById(cluster, 0))}} - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) }) It("Different vips", func() { @@ -163,7 +166,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("Not free", func() { @@ -175,7 +178,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.9\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) }) It("Empty", func() { @@ -187,7 +190,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("Free", func() { @@ -199,7 +202,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\",\"1.2.5.9\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("machine cidr is too small", func() { @@ -214,7 +217,7 @@ var _ = Describe("inventory", func() { } cluster.Hosts = []*models.Host{h, h, h, h, h} cluster.APIVips = []*models.APIVip{{IP: "1.2.5.2"}} - err := VerifyVips(cluster.Hosts, "1.2.5.0/29", GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, []*models.MachineNetwork{{Cidr: "1.2.5.0/29"}}, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("The machine network range is too small for the cluster")) }) From 219c4f1a967315a72e29226865c2ca146458c37a Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 3 Sep 2024 15:45:31 +1200 Subject: [PATCH 09/15] Validate that hosts are in VIP MachineNetworks When doing cluster validations, check that all of the hosts that the VIP can point to (i.e. control plane hosts for the API VIP, workers for the Ingress VIP) are members of the VIP's MachineNetwork. Since at the time of adding the VIPs (and also during cluster validations) we only check that the VIP is a member of _some_ MachineNetwork, we need this additional check to ensure that it is one where the hosts are. --- internal/cluster/validator.go | 7 +- internal/network/machine_network_cidr.go | 34 ++++ internal/network/machine_network_cidr_test.go | 189 ++++++++++++++++++ 3 files changed, 229 insertions(+), 1 deletion(-) diff --git a/internal/cluster/validator.go b/internal/cluster/validator.go index a8288aa5bb..6b3be069bc 100644 --- a/internal/cluster/validator.go +++ b/internal/cluster/validator.go @@ -243,7 +243,12 @@ func (v *clusterValidator) areVipsValid(c *clusterPreprocessContext, vipsWrapper for i := 0; i != vipsWrapper.Len(); i++ { verification, err := network.VerifyVip(c.cluster.Hosts, c.cluster.MachineNetworks, vipsWrapper.IP(i), vipsWrapper.Type(), vipsWrapper.Verification(i), v.log) - failed = failed || verification != models.VipVerificationSucceeded + if verification == models.VipVerificationSucceeded { + verification, err = network.ValidateVipInHostNetworks(c.cluster.Hosts, c.cluster.MachineNetworks, vipsWrapper.IP(i), vipsWrapper.Type(), v.log) + failed = failed || verification != models.VipVerificationSucceeded + } else { + failed = true + } if err != nil { multiErr = multierror.Append(multiErr, err) } diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index 086956ddd3..8f59db0070 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -167,6 +167,40 @@ func VerifyVip(hosts []*models.Host, machineNetworks []*models.MachineNetwork, v return ret, errors.New(msg) } +func ValidateVipInHostNetworks(hosts []*models.Host, machineNetworks []*models.MachineNetwork, vip string, vipName VipType, log logrus.FieldLogger) (models.VipVerification, error) { + _, machineNetworkCidr := findMachineNetworkForVip(vip, machineNetworks) + _, machineIpnet, err := net.ParseCIDR(machineNetworkCidr) + if err != nil { + log.WithError(err).Errorf("can't parse machine cidr %s", machineNetworkCidr) + return models.VipVerificationFailed, fmt.Errorf("can't parse machine cidr %s", machineNetworkCidr) + } + for _, h := range hosts { + switch common.GetEffectiveRole(h) { + case models.HostRoleWorker: + if vipName != VipTypeIngress { + continue + } + case models.HostRoleMaster, models.HostRoleBootstrap: + // When there are fewer than 2 workers, control plane nodes are + // schedulable and Ingress will run on the control plane nodes + if vipName != VipTypeAPI && len(hosts) > 4 { + continue + } + default: + continue + } + if h.Inventory == "" { + continue + } + if !belongsToNetwork(log, h, machineIpnet) { + return models.VipVerificationFailed, fmt.Errorf( + "%s host %s not in the Machine Network containing the %s <%s>", + h.Role, h.ID, vipName, machineNetworkCidr) + } + } + return models.VipVerificationSucceeded, nil +} + func ValidateNoVIPAddressesDuplicates(apiVips []*models.APIVip, ingressVips []*models.IngressVip) error { var ( err error diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go index a5d32de28f..7294447e55 100644 --- a/internal/network/machine_network_cidr_test.go +++ b/internal/network/machine_network_cidr_test.go @@ -133,6 +133,195 @@ var _ = Describe("inventory", func() { }) }) + Context("ValidateVipInHostNetworks", func() { + var ( + log logrus.FieldLogger + primaryMachineCidr = "1.2.4.0/23" + secondaryMachineCidr = "1.2.8.0/23" + machineNetworks = []*models.MachineNetwork{ + {Cidr: models.Subnet(primaryMachineCidr)}, + {Cidr: models.Subnet(secondaryMachineCidr)}, + } + ) + + createRoleHost := func(role models.HostRole, cidr string) *models.Host { + return &models.Host{ + Role: role, + Inventory: createInventory(createInterface(cidr)), + } + } + + BeforeEach(func() { + log = logrus.New() + }) + It("succeeds when all hosts are in a single machine network", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when VIPs are in the wrong machine network", func() { + cluster := createCluster("1.2.9.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + It("succeeds on a compact cluster with a single machine network", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("succeeds when VIPs are on different machine networks", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when VIPs are on wrong machine networks", func() { + cluster := createCluster("1.2.9.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + It("detects when Ingress VIP not available to a compact cluster", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + It("detects when API VIP not available to bootstrap master", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.8.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when API VIP not available to control plane", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.8.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when Ingress VIP not available to worker", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + }) Context("VerifyVips", func() { var ( log logrus.FieldLogger From a3bccff601ec46af0339ca130b4c2ebd06b5611f Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Tue, 20 Aug 2024 22:10:32 +1200 Subject: [PATCH 10/15] Remove unused function It doesn't appear this was ever used outside of its own unit tests. --- internal/network/machine_network_cidr.go | 17 ----------- internal/network/machine_network_cidr_test.go | 30 ------------------- 2 files changed, 47 deletions(-) diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index 8f59db0070..b973cc1fcb 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -355,23 +355,6 @@ func belongsToNetwork(log logrus.FieldLogger, h *models.Host, machineIpnet *net. return false } -func GetPrimaryMachineCIDRHosts(log logrus.FieldLogger, cluster *common.Cluster) ([]*models.Host, error) { - if !IsMachineCidrAvailable(cluster) { - return nil, errors.New("Machine network CIDR was not set in cluster") - } - _, machineIpnet, err := net.ParseCIDR(GetMachineCidrById(cluster, 0)) - if err != nil { - return nil, err - } - ret := make([]*models.Host, 0) - for _, h := range cluster.Hosts { - if belongsToNetwork(log, h, machineIpnet) { - ret = append(ret, h) - } - } - return ret, nil -} - // GetPrimaryMachineCidrForUserManagedNetwork used to get the primary machine cidr in case of none platform and sno func GetPrimaryMachineCidrForUserManagedNetwork(cluster *common.Cluster, log logrus.FieldLogger) string { if IsMachineCidrAvailable(cluster) { diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go index 7294447e55..45d2cddf06 100644 --- a/internal/network/machine_network_cidr_test.go +++ b/internal/network/machine_network_cidr_test.go @@ -103,36 +103,6 @@ var _ = Describe("inventory", func() { Expect(cidr).To(Equal("")) }) }) - Context("GetMachineCIDRHosts", func() { - It("No Machine CIDR", func() { - cluster := createCluster("1.2.5.6", "", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - _, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) - Expect(err).To(HaveOccurred()) - }) - It("No matching Machine CIDR", func() { - cluster := createCluster("1.2.5.6", "1.1.0.0/16", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - hosts, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(hosts).To(BeEmpty()) - }) - It("Some matched", func() { - cluster := createCluster("1.2.5.6", "1.2.4.0/23", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17")), - createInventory(createInterface("1.2.4.79/23"))) - hosts, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(hosts).To(Equal([]*models.Host{ - cluster.Hosts[0], - cluster.Hosts[2], - })) - - }) - }) Context("ValidateVipInHostNetworks", func() { var ( log logrus.FieldLogger From 568ffb0af03062677a754f8a3a9fe48590dfdbbb Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 11 Mar 2024 13:33:57 +1300 Subject: [PATCH 11/15] Allow host to be in any MachineNetwork In the belongs-to-machine-cidr validation, allow the host to be a member of any MachineNetwork. In a dual-stack cluster, require it to be a member of both an IPv4 and an IPv6 network. Previously it was assumed that the only reason for multiple MachineNetworks to appear was that a dual stack cluster could contain exactly one IPv4 and one IPv6 MachineNetwork. --- internal/host/transition_test.go | 18 +++---- internal/host/validator.go | 6 +-- internal/network/machine_network_cidr.go | 50 ++++++++----------- internal/network/machine_network_cidr_test.go | 17 ++++--- 4 files changed, 43 insertions(+), 48 deletions(-) diff --git a/internal/host/transition_test.go b/internal/host/transition_test.go index aff1920404..aac5ae1d39 100644 --- a/internal/host/transition_test.go +++ b/internal/host/transition_test.go @@ -76,7 +76,7 @@ func createValidatorCfg() *hardware.ValidatorCfg { EnableUpgradeAgent: true, }, VersionedRequirements: hardware.VersionedRequirementsDecoder{ - "default": { + "default": models.VersionedHostRequirements{ Version: "default", MasterRequirements: &defaultMasterRequirements, WorkerRequirements: &defaultWorkerRequirements, @@ -3204,7 +3204,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleWorker, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -3218,7 +3218,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname worker-1 is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsPlatformNetworkSettingsValid: {status: ValidationSuccess, messagePattern: "Platform RHEL is allowed"}, CompatibleWithClusterPlatform: {status: ValidationSuccess, messagePattern: "Host is compatible with cluster platform baremetal"}, IsNTPSynced: {status: ValidationFailure, messagePattern: "Host couldn't synchronize with any NTP server"}, @@ -3239,7 +3239,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Require at least 4 CPU cores for master role, found only 2", "Require at least 16.00 GiB RAM for role master, found only 8.00 GiB", "Host couldn't synchronize with any NTP server", @@ -3255,7 +3255,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationFailure, messagePattern: "Require at least 4 CPU cores for master role, found only 2"}, HasMemoryForRole: {status: ValidationFailure, messagePattern: "Require at least 16.00 GiB RAM for role master, found only 8.00 GiB"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname worker-1 is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsPlatformNetworkSettingsValid: {status: ValidationSuccess, messagePattern: "Platform RHEL is allowed"}, CompatibleWithClusterPlatform: {status: ValidationSuccess, messagePattern: "Host is compatible with cluster platform baremetal"}, IsNTPSynced: {status: ValidationFailure, messagePattern: "Host couldn't synchronize with any NTP server"}, @@ -3369,7 +3369,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -3402,7 +3402,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -4234,7 +4234,7 @@ var _ = Describe("Refresh Host", func() { ntpSources: defaultNTPSources, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks")), + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use")), validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, @@ -4245,7 +4245,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, BelongsToMajorityGroup: {status: ValidationSuccess, messagePattern: "Host has connectivity to the majority of hosts in the cluster"}, IsNTPSynced: {status: ValidationSuccess, messagePattern: "Host NTP is synced"}, diff --git a/internal/host/validator.go b/internal/host/validator.go index f1ccb79894..eea2a0de7a 100644 --- a/internal/host/validator.go +++ b/internal/host/validator.go @@ -605,8 +605,8 @@ func (v *validator) belongsToMachineCidr(c *validationContext) (ValidationStatus if c.inventory == nil || !network.IsMachineCidrAvailable(c.cluster) { return ValidationPending, "Missing inventory or machine network CIDR" } - if !network.IsHostInPrimaryMachineNetCidr(v.log, c.cluster, c.host) { - return ValidationFailure, "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks" + if !network.IsHostInMachineNetCidrs(v.log, c.cluster, c.host) { + return ValidationFailure, "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use" } return ValidationSuccess, "Host belongs to all machine network CIDRs" } @@ -1874,7 +1874,7 @@ func (v *validator) noIscsiNicBelongsToMachineCidr(c *validationContext) (Valida return ValidationError, "Cannot find network interface associated to iSCSI host IP address" } - found := network.IsInterfaceInPrimaryMachineNetCidr(v.log, c.cluster, nic) + found := network.IsInterfaceInMachineNetCidr(v.log, c.cluster, nic) if found { return ValidationFailure, "Network interface connected to iSCSI disk cannot belong to machine network CIDRs" } diff --git a/internal/network/machine_network_cidr.go b/internal/network/machine_network_cidr.go index b973cc1fcb..9757ae8b23 100644 --- a/internal/network/machine_network_cidr.go +++ b/internal/network/machine_network_cidr.go @@ -542,62 +542,56 @@ func GetDefaultRouteNetworkByFamily(h *models.Host, networks map[AddressFamily][ } // Parse the Machine Network CIDRs into IPNet -func parseMachineNetworks(machineNetworks []*models.MachineNetwork) ([]*net.IPNet, error) { - var parsedCidr []*net.IPNet +func parseMachineNetworks(machineNetworks []*models.MachineNetwork) ([]*net.IPNet, []*net.IPNet, error) { + var parsedV4Cidr, parsedV6Cidr []*net.IPNet for _, machineNet := range machineNetworks { _, machineIpnet, err := net.ParseCIDR(string(machineNet.Cidr)) if err != nil { - return nil, err + return nil, nil, err + } + if IsIPV4CIDR(string(machineNet.Cidr)) { + parsedV4Cidr = append(parsedV4Cidr, machineIpnet) + } else { + parsedV6Cidr = append(parsedV6Cidr, machineIpnet) } - parsedCidr = append(parsedCidr, machineIpnet) } - return parsedCidr, nil + return parsedV4Cidr, parsedV6Cidr, nil } -// Check if a host belongs to all the networks specified as Machine Networks. -func IsHostInPrimaryMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, host *models.Host) bool { - // TODO(mko) This rule should be revised as soon as OCP supports multiple machineNetwork - // entries using the same IP stack. - return forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { - // initialize agg to true the first time this function is called - if index == 0 { - agg = true - } - - return agg && belongsToNetwork(log, host, machineIpnet) +// Check if a host belongs to one of the Machine Networks of each family. +func IsHostInMachineNetCidrs(log logrus.FieldLogger, cluster *common.Cluster, host *models.Host) bool { + inV4, inV6 := forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { + return agg || belongsToNetwork(log, host, machineIpnet) }) + return inV4 && inV6 } // Check if an interface is part of one of the networks specified as Machine Networks -func IsInterfaceInPrimaryMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, nic *models.Interface) bool { - return forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { - // initialize agg to false the first time this function is called - if index == 0 { - agg = false - } - +func IsInterfaceInMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, nic *models.Interface) bool { + inV4, inV6 := forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { isIPv4 := IsIPV4CIDR(machineIpnet.String()) found, _ := findMatchingIP(machineIpnet, nic, isIPv4) return agg || found }) + return inV4 || inV6 } type machineNetworkCheckFunc func(bool, *net.IPNet, int) bool // Function calling a given check fonction on each of the networks specified in cluster's Machine Networks. -func forEachMachineNetwork(log logrus.FieldLogger, cluster *common.Cluster, checkFunc machineNetworkCheckFunc) bool { +func forEachMachineNetwork(log logrus.FieldLogger, cluster *common.Cluster, checkFunc machineNetworkCheckFunc) (bool, bool) { if !IsMachineCidrAvailable(cluster) { - return false + return false, false } - machineNetwork, err := parseMachineNetworks(cluster.MachineNetworks) + machineNetworksV4, machineNetworksV6, err := parseMachineNetworks(cluster.MachineNetworks) if err != nil { log.WithError(err).Warn("Failed to parse machine networks") - return false + return false, false } - return lo.Reduce(machineNetwork, checkFunc, false) + return lo.Reduce(machineNetworksV4, checkFunc, false), lo.Reduce(machineNetworksV6, checkFunc, false) } type IPSet map[strfmt.IPv4]struct{} diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go index 45d2cddf06..4946e470b2 100644 --- a/internal/network/machine_network_cidr_test.go +++ b/internal/network/machine_network_cidr_test.go @@ -497,7 +497,7 @@ var _ = Describe("inventory", func() { }) - Context("IsHostInPrimaryMachineNetCidr", func() { + Context("IsHostInMachineNetCidrs", func() { var log logrus.FieldLogger @@ -506,22 +506,23 @@ var _ = Describe("inventory", func() { }) DescribeTable( - "IsHostInPrimaryMachineNetCidr", + "IsHostInMachineNetCidrs", func(nics []*models.Interface, machineNetworks []*models.MachineNetwork, expectedResult bool) { cluster := createCluster("", "", createInventory(nics...)) cluster.MachineNetworks = machineNetworks - res := IsHostInPrimaryMachineNetCidr(log, cluster, cluster.Hosts[0]) + res := IsHostInMachineNetCidrs(log, cluster, cluster.Hosts[0]) Expect(res).To(Equal(expectedResult)) }, Entry("MachineNetworks is empty", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{}, false), Entry("MachineNetworks is malformed", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "a.b.c.d"}}, false), Entry("Interfaces is empty", []*models.Interface{}, []*models.MachineNetwork{{Cidr: "a.b.c.d"}}, false), Entry("Interface IP is malformed", []*models.Interface{createInterface("a.b.c.d/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.4/24"}}, false), - Entry("Host belongs to all machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}}, true), - Entry("Host doesn't belong to all machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::a1/120"}}, false), + Entry("Host belongs to dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}}, true), + Entry("Host doesn't belong to dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::a1/120"}}, false), + Entry("Host belongs to one of dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}, {Cidr: "1.2.4.0/24"}, {Cidr: "2001:db81::/48"}}, true), ) }) - Context("IsInterfaceInPrimaryMachineNetCidr", func() { + Context("IsInterfaceInMachineNetCidr", func() { var log logrus.FieldLogger @@ -530,11 +531,11 @@ var _ = Describe("inventory", func() { }) DescribeTable( - "IsInterfaceInPrimaryMachineNetCidr", + "IsInterfaceInMachineNetCidr", func(nic *models.Interface, machineNetworks []*models.MachineNetwork, expectedResult bool) { cluster := createCluster("", "", createInventory(nic)) cluster.MachineNetworks = machineNetworks - res := IsInterfaceInPrimaryMachineNetCidr(log, cluster, nic) + res := IsInterfaceInMachineNetCidr(log, cluster, nic) Expect(res).To(Equal(expectedResult)) }, Entry("MachineNetworks is empty", createInterface("1.2.3.4/24"), []*models.MachineNetwork{}, false), From 2ca8e8de3d7a8d7252001cf62ccd9c31fca0b6f7 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 28 Feb 2024 17:59:04 +1300 Subject: [PATCH 12/15] Allow multiple MachineNetworks in dual stack Multiple MachineNetworks in the same address family and IPv6-primary dual-stack clusters are a thing, so relax the dual-stack validation requirements for machine networks to allow them. --- internal/bminventory/inventory_test.go | 65 +++++++++---------- .../cluster/validations/validation_test.go | 9 +-- internal/network/dual_stack_validations.go | 24 ++++--- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/internal/bminventory/inventory_test.go b/internal/bminventory/inventory_test.go index b14e14fd5a..fcc9dfc80e 100644 --- a/internal/bminventory/inventory_test.go +++ b/internal/bminventory/inventory_test.go @@ -4350,33 +4350,6 @@ var _ = Describe("cluster", func() { actual := reply.(*installer.V2UpdateClusterCreated) Expect(len(actual.Payload.MachineNetworks)).To(Equal(2)) }) - It("Wrong order of machine network CIDRs in non dhcp for dual-stack", func() { - mockClusterUpdatability(1) - mockSuccess(1) - - apiVip := "10.11.12.15" - ingressVip := "10.11.12.16" - reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "10.128.0.0/14", HostPrefix: 23}, {Cidr: "fd01::/48", HostPrefix: 64}}, - MachineNetworks: []*models.MachineNetwork{{Cidr: "10.11.0.0/16"}, {Cidr: "fd2e:6f44:5dd8:c956::/120"}}, - }, - }) - Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) - - reply = bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - MachineNetworks: []*models.MachineNetwork{{Cidr: "fd2e:6f44:5dd8:c956::/120"}, {Cidr: "10.12.0.0/16"}}, - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "First machine network has to be IPv4 subnet") - }) It("API VIP in wrong subnet for dual-stack", func() { apiVip := "10.11.12.15" ingressVip := "10.11.12.16" @@ -17928,9 +17901,36 @@ var _ = Describe("Dual-stack cluster", func() { reply := bm.V2RegisterCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, errStr) }) - It("v6-first in machine networks rejected", func() { - errStr := "First machine network has to be IPv4 subnet" - params.NewClusterParams.MachineNetworks = TestDualStackNetworkingWrongOrder.MachineNetworks + }) + + Context("Cluster with two networks of same stack", func() { + It("only v4 in cluster networks rejected", func() { + errStr := "Second cluster network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = []*models.ClusterNetwork{ + {Cidr: "1.3.0.0/16", HostPrefix: 16}, + {Cidr: "1.4.0.0/16", HostPrefix: 16}, + } + params.NewClusterParams.ServiceNetworks = common.TestDualStackNetworking.ServiceNetworks + reply := bm.V2RegisterCluster(ctx, params) + verifyApiErrorString(reply, http.StatusBadRequest, errStr) + }) + It("only v4 in service networks rejected", func() { + errStr := "Second service network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = common.TestDualStackNetworking.ClusterNetworks + params.NewClusterParams.ServiceNetworks = []*models.ServiceNetwork{ + {Cidr: "1.2.5.0/24"}, + {Cidr: "1.2.6.0/24"}, + } + reply := bm.V2RegisterCluster(ctx, params) + verifyApiErrorString(reply, http.StatusBadRequest, errStr) + }) + It("only v4 in machine networks rejected", func() { + errStr := "One machine network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = common.TestDualStackNetworking.ClusterNetworks + params.NewClusterParams.MachineNetworks = []*models.MachineNetwork{ + {Cidr: "1.2.3.0/24"}, + {Cidr: "1.2.4.0/24"}, + } reply := bm.V2RegisterCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, errStr) }) @@ -17994,11 +17994,6 @@ var _ = Describe("Dual-stack cluster", func() { reply := bm.V2UpdateCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, "First service network has to be IPv4 subnet") }) - It("v6-first in machine networks rejected", func() { - params.ClusterUpdateParams.MachineNetworks = TestDualStackNetworkingWrongOrder.MachineNetworks - reply := bm.V2UpdateCluster(ctx, params) - verifyApiErrorString(reply, http.StatusBadRequest, "First machine network has to be IPv4 subnet") - }) }) Context("Cluster with single network when two required", func() { diff --git a/internal/cluster/validations/validation_test.go b/internal/cluster/validations/validation_test.go index 5724680254..f67211d0c9 100644 --- a/internal/cluster/validations/validation_test.go +++ b/internal/cluster/validations/validation_test.go @@ -657,19 +657,16 @@ var _ = Describe("Machine Network amount and order", func() { valid: true, }, { - // Invalid because violates the "IPv4 subnet as the first one" constraint element: []*models.MachineNetwork{{Cidr: "1002:db8::/119"}, {Cidr: "1.2.5.0/24"}}, - valid: false, + valid: true, }, { - // Invalid because violates the "exactly 2 networks" constraint element: []*models.MachineNetwork{{Cidr: "1.2.5.0/24"}, {Cidr: "1002:db8::/119"}, {Cidr: "1.2.6.0/24"}, {Cidr: "1.2.7.0/24"}}, - valid: false, + valid: true, }, { - // Invalid because violates the "exactly 2 networks" constraint element: []*models.MachineNetwork{{Cidr: "1002:db8::/119"}, {Cidr: "1.2.5.0/24"}, {Cidr: "1.2.6.0/24"}, {Cidr: "1.2.7.0/24"}}, - valid: false, + valid: true, }, } for _, test := range tests { diff --git a/internal/network/dual_stack_validations.go b/internal/network/dual_stack_validations.go index 114476dbe0..ba1bd8bb1b 100644 --- a/internal/network/dual_stack_validations.go +++ b/internal/network/dual_stack_validations.go @@ -7,21 +7,29 @@ import ( ) // Verify if the constrains for dual-stack machine networks are met: -// - there are exactly two machine networks -// - the first one is IPv4 subnet -// - the second one is IPv6 subnet +// - there are at least two machine networks +// - at least one is IPv4 subnet +// - at least one is IPv6 subnet func VerifyMachineNetworksDualStack(networks []*models.MachineNetwork, isDualStack bool) error { if !isDualStack { return nil } - if len(networks) != 2 { + if len(networks) < 2 { return errors.Errorf("Expected 2 machine networks, found %d", len(networks)) } - if !IsIPV4CIDR(string(networks[0].Cidr)) { - return errors.Errorf("First machine network has to be IPv4 subnet") + var haveIPv4, haveIPv6 bool + for _, net := range networks { + if IsIPV4CIDR(string(net.Cidr)) { + haveIPv4 = true + } else if IsIPv6CIDR(string(net.Cidr)) { + haveIPv6 = true + } } - if !IsIPv6CIDR(string(networks[1].Cidr)) { - return errors.Errorf("Second machine network has to be IPv6 subnet") + if !haveIPv4 { + return errors.Errorf("One machine network has to be IPv4 subnet") + } + if !haveIPv6 { + return errors.Errorf("One machine network has to be IPv6 subnet") } return nil From 102891f2aa24b162fa5212e6051c6e6a67bd92f0 Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Wed, 28 Feb 2024 09:46:08 +1300 Subject: [PATCH 13/15] Revert "Forbid multiple machine networks in single-stack clusters" Allow users to specify multiple machine networks of the same address family. This is a documented and supported feature of OpenShift. This reverts commit 873dd81bb499c9db0e868d542b9236bdd6a94079. --- internal/bminventory/inventory_test.go | 46 +++++++++------------ internal/cluster/validations/validations.go | 5 --- 2 files changed, 19 insertions(+), 32 deletions(-) diff --git a/internal/bminventory/inventory_test.go b/internal/bminventory/inventory_test.go index fcc9dfc80e..fd9c4580e4 100644 --- a/internal/bminventory/inventory_test.go +++ b/internal/bminventory/inventory_test.go @@ -4830,9 +4830,9 @@ var _ = Describe("cluster", func() { Context("Networks", func() { var ( - clusterNetworks = common.TestIPv4Networking.ClusterNetworks - serviceNetworks = common.TestIPv4Networking.ServiceNetworks - machineNetworks = common.TestIPv4Networking.MachineNetworks + clusterNetworks = []*models.ClusterNetwork{{Cidr: "1.1.0.0/24", HostPrefix: 24}, {Cidr: "2.2.0.0/24", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "3.3.0.0/24"}, {Cidr: "4.4.0.0/24"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "5.5.0.0/24"}, {Cidr: "6.6.0.0/24"}} ) setNetworksClusterID := func(clusterID strfmt.UUID, @@ -5021,22 +5021,16 @@ var _ = Describe("cluster", func() { verifyApiErrorString(reply, http.StatusBadRequest, "Machine network CIDR '': Failed to parse CIDR '': invalid CIDR address: ") }) - It("Multiple machine networks illegal for single-stack", func() { - reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - MachineNetworks: []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}}, - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "Single-stack cluster cannot contain multiple Machine Networks") - }) + It("Override networks - additional subnet", func() { + clusterNetworks = []*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "13.13.0.0/21"}, {Cidr: "14.14.0.0/21"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}} - It("Override networks - additional cluster subnet", func() { mockSuccess(1) reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, + ClusterNetworks: clusterNetworks, ServiceNetworks: serviceNetworks, MachineNetworks: machineNetworks, VipDhcpAllocation: swag.Bool(true), @@ -5045,19 +5039,20 @@ var _ = Describe("cluster", func() { Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) actual := reply.(*installer.V2UpdateClusterCreated) - validateNetworkConfiguration(actual.Payload, - &[]*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, - &serviceNetworks, - &machineNetworks) + validateNetworkConfiguration(actual.Payload, &clusterNetworks, &serviceNetworks, &machineNetworks) validateHostsRequestedHostname(actual.Payload) }) - It("Override networks - 2 additional cluster subnets", func() { + It("Override networks - 2 additional subnets", func() { + clusterNetworks = []*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "13.13.0.0/21"}, {Cidr: "14.14.0.0/21"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}} + mockSuccess(1) reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, + ClusterNetworks: clusterNetworks, ServiceNetworks: serviceNetworks, MachineNetworks: machineNetworks, VipDhcpAllocation: swag.Bool(true), @@ -5066,10 +5061,7 @@ var _ = Describe("cluster", func() { Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) actual := reply.(*installer.V2UpdateClusterCreated) - validateNetworkConfiguration(actual.Payload, - &[]*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, - &serviceNetworks, - &machineNetworks) + validateNetworkConfiguration(actual.Payload, &clusterNetworks, &serviceNetworks, &machineNetworks) validateHostsRequestedHostname(actual.Payload) }) @@ -15323,9 +15315,9 @@ location = "%s" Context("Networking", func() { var ( - clusterNetworks = common.TestIPv4Networking.ClusterNetworks - serviceNetworks = common.TestIPv4Networking.ServiceNetworks - machineNetworks = common.TestIPv4Networking.MachineNetworks + clusterNetworks = []*models.ClusterNetwork{{Cidr: "1.1.1.0/24", HostPrefix: 24}, {Cidr: "2.2.2.0/24", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "3.3.3.0/24"}, {Cidr: "4.4.4.0/24"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "5.5.5.0/24"}, {Cidr: "6.6.6.0/24"}, {Cidr: "7.7.7.0/24"}} ) registerCluster := func() *models.Cluster { diff --git a/internal/cluster/validations/validations.go b/internal/cluster/validations/validations.go index f3bfb6f97a..b0f53222f1 100644 --- a/internal/cluster/validations/validations.go +++ b/internal/cluster/validations/validations.go @@ -605,11 +605,6 @@ func ValidateDualStackNetworks(clusterParams interface{}, alreadyDualStack bool) return err } } - } else { - if len(machineNetworks) > 1 { - err := errors.Errorf("Single-stack cluster cannot contain multiple Machine Networks") - return err - } } return nil } From 5f5598e06ba3452209a83ef625fb8b20e99519dc Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 11 Mar 2024 13:18:53 +1300 Subject: [PATCH 14/15] Find BootMACAddress from any machine network Don't restrict ourselves to the first machine network when looking for an interface on a machine network to set the BootMACAddress. --- internal/provider/baremetal/installConfig.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/internal/provider/baremetal/installConfig.go b/internal/provider/baremetal/installConfig.go index b60b5e2a40..241d1fc1d5 100644 --- a/internal/provider/baremetal/installConfig.go +++ b/internal/provider/baremetal/installConfig.go @@ -53,22 +53,24 @@ func (p *baremetalProvider) AddPlatformToInstallConfig( return err } + InterfaceSearch: for _, iface := range inventory.Interfaces { // We are looking for the NIC that matches the first Machine Network configured // for the cluster. This is to ensure that BootMACAddress belongs to the NIC that // is really used and not to any fake interface even if this interface has IPs. ipAddressses := append(iface.IPV4Addresses, iface.IPV6Addresses...) for _, ip := range ipAddressses { - overlap, _ := network.NetworksOverlap(ip, string(cluster.MachineNetworks[0].Cidr)) - if overlap { - hosts[yamlHostIdx].BootMACAddress = iface.MacAddress - break + for _, machineNetwork := range cluster.MachineNetworks { + overlap, _ := network.NetworksOverlap(ip, string(machineNetwork.Cidr)) + if overlap { + hosts[yamlHostIdx].BootMACAddress = iface.MacAddress + break InterfaceSearch + } } } } if hosts[yamlHostIdx].BootMACAddress == "" { - err = errors.Errorf("Failed to find a network interface matching machine network (%s) for host %s", - cluster.MachineNetworks[0].Cidr, + err = errors.Errorf("Failed to find a network interface matching machine networks for host %s", hostutil.GetHostnameForMsg(host), ) p.Log.Error(err) From b9ba27b9569e9fe1aaa742754370d3745cebde7e Mon Sep 17 00:00:00 2001 From: Zane Bitter Date: Mon, 4 Mar 2024 14:19:28 +1300 Subject: [PATCH 15/15] Update comment on L2 connectivity checks and MachineNetworks OpenShift has ~always supported having machines in multiple machineNetworks, so update the TODO comment to reflect that accounting for this is already something we need to do to fully support non-UserManagedNetworking clusters. (UserManagedNetworking clusters use only the L3 connectivity check.) See https://issues.redhat.com/browse/OCPBUGS-30730 for more details. --- internal/host/validator.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/host/validator.go b/internal/host/validator.go index eea2a0de7a..65a6ab6c3b 100644 --- a/internal/host/validator.go +++ b/internal/host/validator.go @@ -675,8 +675,9 @@ func (v *validator) belongsToL2MajorityGroup(c *validationContext, majorityGroup return ValidationFailure } - // TODO(mko) This rule should be revised as soon as OCP supports multiple machineNetwork - // entries using the same IP stack. + // TODO(mko) This rule must be revised to support multiple machineNetwork + // entries using the same IP stack on clusters without + // user-managed networking. (OCPBUGS-30730) areNetworksEqual := func(ipnet1, ipnet2 *net.IPNet) bool { return ipnet1.IP.Equal(ipnet2.IP) && bytes.Equal(ipnet1.Mask, ipnet2.Mask) }