diff --git a/internal/bminventory/inventory.go b/internal/bminventory/inventory.go index ec0a836474e..cdbf7f963a9 100644 --- a/internal/bminventory/inventory.go +++ b/internal/bminventory/inventory.go @@ -2235,48 +2235,39 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(cluster *common.Cluster, return common.NewApiError(http.StatusBadRequest, err) } if interactivity == Interactive && (params.ClusterUpdateParams.APIVips != nil || params.ClusterUpdateParams.IngressVips != nil) { - var primaryMachineNetworkCidr string - var secondaryMachineNetworkCidr string - matchRequired := network.GetApiVipById(&targetConfiguration, 0) != "" || network.GetIngressVipById(&targetConfiguration, 0) != "" // We want to calculate Machine Network based on the API/Ingress VIPs only in case of the // single-stack cluster. Auto calculation is not supported for dual-stack in which we // require that user explicitly provides all the Machine Networks. if reqDualStack { - if params.ClusterUpdateParams.MachineNetworks != nil { - primaryMachineNetworkCidr = string(params.ClusterUpdateParams.MachineNetworks[0].Cidr) - } else { - primaryMachineNetworkCidr = network.GetPrimaryMachineCidrForUserManagedNetwork(cluster, log) - } - err = network.VerifyMachineNetworksDualStack(targetConfiguration.MachineNetworks, reqDualStack) if err != nil { log.WithError(err).Warnf("Verify dual-stack machine networks") return common.NewApiError(http.StatusBadRequest, err) } - secondaryMachineNetworkCidr, err = network.GetSecondaryMachineCidr(&targetConfiguration) - if err != nil { - return common.NewApiError(http.StatusBadRequest, err) - } - if err = network.VerifyVips(cluster.Hosts, primaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, targetConfiguration.MachineNetworks, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } if len(targetConfiguration.IngressVips) == 2 && len(targetConfiguration.APIVips) == 2 { // in case there's a second set of VIPs - if err = network.VerifyVips(cluster.Hosts, secondaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 1), network.GetIngressVipById(&targetConfiguration, 1), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, targetConfiguration.MachineNetworks, network.GetApiVipById(&targetConfiguration, 1), network.GetIngressVipById(&targetConfiguration, 1), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } } } else { - primaryMachineNetworkCidr, err = network.CalculateMachineNetworkCIDR(network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), cluster.Hosts, matchRequired) + // TODO: OCPBUGS-30730 this function needs to be changed to allow + // returning multiple MachineNetworks, so the API VIP and + // Ingress VIP can be in different L2 networks. + primaryMachineNetworkCidr, err := network.CalculateMachineNetworkCIDR(network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), cluster.Hosts, matchRequired) if err != nil { return common.NewApiError(http.StatusBadRequest, errors.Wrap(err, "Calculate machine network CIDR")) } + calculatedMachineNetworks := []*models.MachineNetwork{} if primaryMachineNetworkCidr != "" { // We set the machine networks in the ClusterUpdateParams, so they will be viewed as part of the request // to update the cluster @@ -2285,8 +2276,9 @@ func (b *bareMetalInventory) updateNonDhcpNetworkParams(cluster *common.Cluster, // returned with an error. Therefore, params.ClusterUpdateParams.MachineNetworks is empty here before // the assignment below. params.ClusterUpdateParams.MachineNetworks = []*models.MachineNetwork{{Cidr: models.Subnet(primaryMachineNetworkCidr)}} + calculatedMachineNetworks = params.ClusterUpdateParams.MachineNetworks } - if err = network.VerifyVips(cluster.Hosts, primaryMachineNetworkCidr, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { + if err = network.VerifyVips(cluster.Hosts, calculatedMachineNetworks, network.GetApiVipById(&targetConfiguration, 0), network.GetIngressVipById(&targetConfiguration, 0), log); err != nil { log.WithError(err).Warnf("Verify VIPs") return common.NewApiError(http.StatusBadRequest, err) } @@ -2548,25 +2540,8 @@ func (b *bareMetalInventory) updateNetworks(db *gorm.DB, params installer.V2Upda if params.ClusterUpdateParams.ClusterNetworks != nil || params.ClusterUpdateParams.ServiceNetworks != nil || params.ClusterUpdateParams.MachineNetworks != nil { - // TODO MGMT-7587: Support any number of subnets - // Assumes that the number of cluster networks equal to the number of service networks - for index := range cluster.ClusterNetworks { - machineNetworkCidr := "" - if len(cluster.MachineNetworks) > index { - machineNetworkCidr = string(cluster.MachineNetworks[index].Cidr) - } - - serviceNetworkCidr := "" - if len(cluster.ServiceNetworks) > index { - serviceNetworkCidr = string(cluster.ServiceNetworks[index].Cidr) - } - - if err = network.VerifyClusterCIDRsNotOverlap(machineNetworkCidr, - string(cluster.ClusterNetworks[index].Cidr), - serviceNetworkCidr, - userManagedNetworking); err != nil { - return common.NewApiError(http.StatusBadRequest, err) - } + if err := network.VerifyNoNetworkCidrOverlaps(cluster.ClusterNetworks, cluster.MachineNetworks, cluster.ServiceNetworks); err != nil { + return common.NewApiError(http.StatusBadRequest, err) } } if updated { diff --git a/internal/bminventory/inventory_test.go b/internal/bminventory/inventory_test.go index 5e555f67800..fd9c4580e45 100644 --- a/internal/bminventory/inventory_test.go +++ b/internal/bminventory/inventory_test.go @@ -4208,7 +4208,7 @@ var _ = Describe("cluster", func() { }, }) verifyApiErrorString(reply, http.StatusBadRequest, - "ingress-vip <1.2.3.20> does not belong to machine-network-cidr <10.11.0.0/16>") + "ingress-vip <1.2.3.20> does not belong to any Machine Network") }) It("Same api and ingress", func() { apiVip := "10.11.12.15" @@ -4350,33 +4350,6 @@ var _ = Describe("cluster", func() { actual := reply.(*installer.V2UpdateClusterCreated) Expect(len(actual.Payload.MachineNetworks)).To(Equal(2)) }) - It("Wrong order of machine network CIDRs in non dhcp for dual-stack", func() { - mockClusterUpdatability(1) - mockSuccess(1) - - apiVip := "10.11.12.15" - ingressVip := "10.11.12.16" - reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "10.128.0.0/14", HostPrefix: 23}, {Cidr: "fd01::/48", HostPrefix: 64}}, - MachineNetworks: []*models.MachineNetwork{{Cidr: "10.11.0.0/16"}, {Cidr: "fd2e:6f44:5dd8:c956::/120"}}, - }, - }) - Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) - - reply = bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - MachineNetworks: []*models.MachineNetwork{{Cidr: "fd2e:6f44:5dd8:c956::/120"}, {Cidr: "10.12.0.0/16"}}, - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "First machine network has to be IPv4 subnet") - }) It("API VIP in wrong subnet for dual-stack", func() { apiVip := "10.11.12.15" ingressVip := "10.11.12.16" @@ -4389,7 +4362,7 @@ var _ = Describe("cluster", func() { MachineNetworks: []*models.MachineNetwork{{Cidr: "10.12.0.0/16"}, {Cidr: "fd2e:6f44:5dd8:c956::/120"}}, }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to machine-network-cidr <10.12.0.0/16>") + verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to any Machine Network") }) }) @@ -4857,9 +4830,9 @@ var _ = Describe("cluster", func() { Context("Networks", func() { var ( - clusterNetworks = common.TestIPv4Networking.ClusterNetworks - serviceNetworks = common.TestIPv4Networking.ServiceNetworks - machineNetworks = common.TestIPv4Networking.MachineNetworks + clusterNetworks = []*models.ClusterNetwork{{Cidr: "1.1.0.0/24", HostPrefix: 24}, {Cidr: "2.2.0.0/24", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "3.3.0.0/24"}, {Cidr: "4.4.0.0/24"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "5.5.0.0/24"}, {Cidr: "6.6.0.0/24"}} ) setNetworksClusterID := func(clusterID strfmt.UUID, @@ -5048,22 +5021,16 @@ var _ = Describe("cluster", func() { verifyApiErrorString(reply, http.StatusBadRequest, "Machine network CIDR '': Failed to parse CIDR '': invalid CIDR address: ") }) - It("Multiple machine networks illegal for single-stack", func() { - reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ - ClusterID: clusterID, - ClusterUpdateParams: &models.V2ClusterUpdateParams{ - MachineNetworks: []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}}, - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "Single-stack cluster cannot contain multiple Machine Networks") - }) + It("Override networks - additional subnet", func() { + clusterNetworks = []*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "13.13.0.0/21"}, {Cidr: "14.14.0.0/21"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}} - It("Override networks - additional cluster subnet", func() { mockSuccess(1) reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, + ClusterNetworks: clusterNetworks, ServiceNetworks: serviceNetworks, MachineNetworks: machineNetworks, VipDhcpAllocation: swag.Bool(true), @@ -5072,19 +5039,20 @@ var _ = Describe("cluster", func() { Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) actual := reply.(*installer.V2UpdateClusterCreated) - validateNetworkConfiguration(actual.Payload, - &[]*models.ClusterNetwork{{Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, - &serviceNetworks, - &machineNetworks) + validateNetworkConfiguration(actual.Payload, &clusterNetworks, &serviceNetworks, &machineNetworks) validateHostsRequestedHostname(actual.Payload) }) - It("Override networks - 2 additional cluster subnets", func() { + It("Override networks - 2 additional subnets", func() { + clusterNetworks = []*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "13.13.0.0/21"}, {Cidr: "14.14.0.0/21"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "15.15.0.0/21"}, {Cidr: "16.16.0.0/21"}} + mockSuccess(1) reply := bm.V2UpdateCluster(ctx, installer.V2UpdateClusterParams{ ClusterID: clusterID, ClusterUpdateParams: &models.V2ClusterUpdateParams{ - ClusterNetworks: []*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, + ClusterNetworks: clusterNetworks, ServiceNetworks: serviceNetworks, MachineNetworks: machineNetworks, VipDhcpAllocation: swag.Bool(true), @@ -5093,10 +5061,7 @@ var _ = Describe("cluster", func() { Expect(reply).To(BeAssignableToTypeOf(installer.NewV2UpdateClusterCreated())) actual := reply.(*installer.V2UpdateClusterCreated) - validateNetworkConfiguration(actual.Payload, - &[]*models.ClusterNetwork{{Cidr: "10.10.0.0/21", HostPrefix: 24}, {Cidr: "11.11.0.0/21", HostPrefix: 24}, {Cidr: "12.12.0.0/21", HostPrefix: 24}}, - &serviceNetworks, - &machineNetworks) + validateNetworkConfiguration(actual.Payload, &clusterNetworks, &serviceNetworks, &machineNetworks) validateHostsRequestedHostname(actual.Payload) }) @@ -14874,7 +14839,7 @@ location = "%s" VipDhcpAllocation: swag.Bool(false), }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to machine-network-cidr <1.2.3.0/24>") + verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <10.11.12.15> does not belong to any Machine Network") }) It("Ingress VIP not in Machine Network", func() { apiVip := "1.2.3.5" @@ -14890,7 +14855,7 @@ location = "%s" VipDhcpAllocation: swag.Bool(false), }, }) - verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <10.11.12.16> does not belong to machine-network-cidr <1.2.3.0/24>") + verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <10.11.12.16> does not belong to any Machine Network") }) It("API VIP and Ingress VIP with empty Machine Networks", func() { apiVip := "10.11.12.15" @@ -14907,40 +14872,6 @@ location = "%s" }) verifyApiErrorString(reply, http.StatusBadRequest, "Dual-stack cluster cannot be created with empty Machine Networks") }) - - It("API VIP from IPv6 Machine Network", func() { - apiVip := "1001:db8::64" - ingressVip := "1.2.3.6" - - reply := bm.V2RegisterCluster(ctx, installer.V2RegisterClusterParams{ - NewClusterParams: &models.ClusterCreateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: common.TestDualStackNetworking.ClusterNetworks, - MachineNetworks: common.TestDualStackNetworking.MachineNetworks, - ServiceNetworks: common.TestDualStackNetworking.ServiceNetworks, - VipDhcpAllocation: swag.Bool(false), - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "api-vip <1001:db8::64> does not belong to machine-network-cidr <1.2.3.0/24>") - }) - - It("Ingress VIP from IPv6 Machine Network", func() { - apiVip := "1.2.3.5" - ingressVip := "1001:db8::65" - - reply := bm.V2RegisterCluster(ctx, installer.V2RegisterClusterParams{ - NewClusterParams: &models.ClusterCreateParams{ - APIVips: []*models.APIVip{{IP: models.IP(apiVip)}}, - IngressVips: []*models.IngressVip{{IP: models.IP(ingressVip)}}, - ClusterNetworks: common.TestDualStackNetworking.ClusterNetworks, - MachineNetworks: common.TestDualStackNetworking.MachineNetworks, - ServiceNetworks: common.TestDualStackNetworking.ServiceNetworks, - VipDhcpAllocation: swag.Bool(false), - }, - }) - verifyApiErrorString(reply, http.StatusBadRequest, "ingress-vip <1001:db8::65> does not belong to machine-network-cidr <1.2.3.0/24>") - }) }) Context("V2 Update cluster", func() { @@ -15384,9 +15315,9 @@ location = "%s" Context("Networking", func() { var ( - clusterNetworks = common.TestIPv4Networking.ClusterNetworks - serviceNetworks = common.TestIPv4Networking.ServiceNetworks - machineNetworks = common.TestIPv4Networking.MachineNetworks + clusterNetworks = []*models.ClusterNetwork{{Cidr: "1.1.1.0/24", HostPrefix: 24}, {Cidr: "2.2.2.0/24", HostPrefix: 24}} + serviceNetworks = []*models.ServiceNetwork{{Cidr: "3.3.3.0/24"}, {Cidr: "4.4.4.0/24"}} + machineNetworks = []*models.MachineNetwork{{Cidr: "5.5.5.0/24"}, {Cidr: "6.6.6.0/24"}, {Cidr: "7.7.7.0/24"}} ) registerCluster := func() *models.Cluster { @@ -17962,9 +17893,36 @@ var _ = Describe("Dual-stack cluster", func() { reply := bm.V2RegisterCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, errStr) }) - It("v6-first in machine networks rejected", func() { - errStr := "First machine network has to be IPv4 subnet" - params.NewClusterParams.MachineNetworks = TestDualStackNetworkingWrongOrder.MachineNetworks + }) + + Context("Cluster with two networks of same stack", func() { + It("only v4 in cluster networks rejected", func() { + errStr := "Second cluster network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = []*models.ClusterNetwork{ + {Cidr: "1.3.0.0/16", HostPrefix: 16}, + {Cidr: "1.4.0.0/16", HostPrefix: 16}, + } + params.NewClusterParams.ServiceNetworks = common.TestDualStackNetworking.ServiceNetworks + reply := bm.V2RegisterCluster(ctx, params) + verifyApiErrorString(reply, http.StatusBadRequest, errStr) + }) + It("only v4 in service networks rejected", func() { + errStr := "Second service network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = common.TestDualStackNetworking.ClusterNetworks + params.NewClusterParams.ServiceNetworks = []*models.ServiceNetwork{ + {Cidr: "1.2.5.0/24"}, + {Cidr: "1.2.6.0/24"}, + } + reply := bm.V2RegisterCluster(ctx, params) + verifyApiErrorString(reply, http.StatusBadRequest, errStr) + }) + It("only v4 in machine networks rejected", func() { + errStr := "One machine network has to be IPv6 subnet" + params.NewClusterParams.ClusterNetworks = common.TestDualStackNetworking.ClusterNetworks + params.NewClusterParams.MachineNetworks = []*models.MachineNetwork{ + {Cidr: "1.2.3.0/24"}, + {Cidr: "1.2.4.0/24"}, + } reply := bm.V2RegisterCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, errStr) }) @@ -18028,11 +17986,6 @@ var _ = Describe("Dual-stack cluster", func() { reply := bm.V2UpdateCluster(ctx, params) verifyApiErrorString(reply, http.StatusBadRequest, "First service network has to be IPv4 subnet") }) - It("v6-first in machine networks rejected", func() { - params.ClusterUpdateParams.MachineNetworks = TestDualStackNetworkingWrongOrder.MachineNetworks - reply := bm.V2UpdateCluster(ctx, params) - verifyApiErrorString(reply, http.StatusBadRequest, "First machine network has to be IPv4 subnet") - }) }) Context("Cluster with single network when two required", func() { diff --git a/internal/cluster/transition_test.go b/internal/cluster/transition_test.go index 452b9a7fd2b..ecd62ebebbb 100644 --- a/internal/cluster/transition_test.go +++ b/internal/cluster/transition_test.go @@ -2295,7 +2295,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ClusterNetworkCidr: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and cluster network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), @@ -2334,7 +2334,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ServiceNetworkCIDR: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and service network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), @@ -2843,7 +2843,7 @@ var _ = Describe("Refresh Cluster - Advanced networking validations", func() { SufficientMastersCount: {status: ValidationSuccess, messagePattern: "The cluster has the exact amount of dedicated control plane nodes."}, isClusterCidrDefined: {status: ValidationSuccess, messagePattern: "Cluster Network CIDR is defined"}, isServiceCidrDefined: {status: ValidationSuccess, messagePattern: "Service Network CIDR is defined"}, - noCidrOverlapping: {status: ValidationFailure, messagePattern: "MachineNetworkCIDR and ServiceNetworkCIDR: CIDRS .* and .* overlap"}, + noCidrOverlapping: {status: ValidationFailure, messagePattern: "CIDRS .* and .* overlap \\(machine network and service network\\)"}, networkPrefixValid: {status: ValidationFailure, messagePattern: "Host prefix, now 0, must be a positive integer"}, isNetworkTypeValid: {status: ValidationSuccess, messagePattern: "The cluster has a valid network type"}, }), @@ -3455,9 +3455,9 @@ var _ = Describe("Refresh Cluster - With DHCP", func() { validationsChecker: makeJsonChecker(map[ValidationID]validationCheckResult{ IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine Network CIDR is defined"}, AreApiVipsDefined: {status: ValidationSuccess, messagePattern: "API virtual IPs are defined"}, - AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api vips <10.10.10.12> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreApiVipsValid: {status: ValidationFailure, messagePattern: "api-vip <10.10.10.12> does not belong to any Machine Network"}, AreIngressVipsDefined: {status: ValidationSuccess, messagePattern: "Ingress virtual IPs are defined"}, - AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress vips <10.10.10.13> does not belong to machine-network-cidr <%s>", string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreIngressVipsValid: {status: ValidationFailure, messagePattern: "ingress-vip <10.10.10.13> does not belong to any Machine Network"}, AllHostsAreReadyToInstall: {status: ValidationSuccess, messagePattern: "All hosts in the cluster are ready to install."}, IsDNSDomainDefined: {status: ValidationSuccess, messagePattern: "The base domain is defined"}, IsPullSecretSet: {status: ValidationSuccess, messagePattern: "The pull secret is set."}, @@ -3486,9 +3486,9 @@ var _ = Describe("Refresh Cluster - With DHCP", func() { IsMachineCidrDefined: {status: ValidationSuccess, messagePattern: "Machine Network CIDR is defined"}, IsMachineCidrEqualsToCalculatedCidr: {status: ValidationSuccess, messagePattern: "Cluster Machine CIDR is equivalent to the calculated CIDR"}, AreApiVipsDefined: {status: ValidationSuccess, messagePattern: "API virtual IPs are defined"}, - AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api vips <%s> is already in use in cidr %s", common.TestIPv4Networking.APIVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreApiVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("api-vip <%s> is already in use in cidr %s", common.TestIPv4Networking.APIVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AreIngressVipsDefined: {status: ValidationSuccess, messagePattern: "Ingress virtual IPs are defined"}, - AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress vips <%s> is already in use in cidr %s", common.TestIPv4Networking.IngressVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, + AreIngressVipsValid: {status: ValidationFailure, messagePattern: fmt.Sprintf("ingress-vip <%s> is already in use in cidr %s", common.TestIPv4Networking.IngressVips[0].IP, string(common.TestIPv4Networking.MachineNetworks[0].Cidr))}, AllHostsAreReadyToInstall: {status: ValidationSuccess, messagePattern: "All hosts in the cluster are ready to install."}, IsDNSDomainDefined: {status: ValidationSuccess, messagePattern: "The base domain is defined"}, IsPullSecretSet: {status: ValidationSuccess, messagePattern: "The pull secret is set."}, diff --git a/internal/cluster/validations/validation_test.go b/internal/cluster/validations/validation_test.go index 57246802545..f67211d0c9c 100644 --- a/internal/cluster/validations/validation_test.go +++ b/internal/cluster/validations/validation_test.go @@ -657,19 +657,16 @@ var _ = Describe("Machine Network amount and order", func() { valid: true, }, { - // Invalid because violates the "IPv4 subnet as the first one" constraint element: []*models.MachineNetwork{{Cidr: "1002:db8::/119"}, {Cidr: "1.2.5.0/24"}}, - valid: false, + valid: true, }, { - // Invalid because violates the "exactly 2 networks" constraint element: []*models.MachineNetwork{{Cidr: "1.2.5.0/24"}, {Cidr: "1002:db8::/119"}, {Cidr: "1.2.6.0/24"}, {Cidr: "1.2.7.0/24"}}, - valid: false, + valid: true, }, { - // Invalid because violates the "exactly 2 networks" constraint element: []*models.MachineNetwork{{Cidr: "1002:db8::/119"}, {Cidr: "1.2.5.0/24"}, {Cidr: "1.2.6.0/24"}, {Cidr: "1.2.7.0/24"}}, - valid: false, + valid: true, }, } for _, test := range tests { diff --git a/internal/cluster/validations/validations.go b/internal/cluster/validations/validations.go index 7481481e6fd..b0f53222f14 100644 --- a/internal/cluster/validations/validations.go +++ b/internal/cluster/validations/validations.go @@ -501,7 +501,7 @@ func validateVIPAddresses(ipV6Supported bool, targetConfiguration common.Cluster } else { if len(targetConfiguration.MachineNetworks) > 0 { for i := range targetConfiguration.APIVips { // len of APIVips and IngressVips should be the same. asserted above. - err = network.VerifyVips(nil, string(targetConfiguration.MachineNetworks[i].Cidr), + err = network.VerifyVips(nil, targetConfiguration.MachineNetworks, string(targetConfiguration.APIVips[i].IP), string(targetConfiguration.IngressVips[i].IP), nil) if err != nil { multiErr = multierror.Append(multiErr, err) @@ -605,11 +605,6 @@ func ValidateDualStackNetworks(clusterParams interface{}, alreadyDualStack bool) return err } } - } else { - if len(machineNetworks) > 1 { - err := errors.Errorf("Single-stack cluster cannot contain multiple Machine Networks") - return err - } } return nil } diff --git a/internal/cluster/validator.go b/internal/cluster/validator.go index 5b55d1ba77f..6b3be069bc6 100644 --- a/internal/cluster/validator.go +++ b/internal/cluster/validator.go @@ -154,15 +154,17 @@ type VipsWrapper interface { IP(index int) string Verification(index int) *models.VipVerification GetVips() []string + Type() network.VipType } type ApiVipsWrapper struct { c *clusterPreprocessContext } -func (a *ApiVipsWrapper) Name() string { return "API" } -func (a *ApiVipsWrapper) Len() int { return len(a.c.cluster.APIVips) } -func (a *ApiVipsWrapper) IP(index int) string { return string(a.c.cluster.APIVips[index].IP) } +func (a *ApiVipsWrapper) Name() string { return "API" } +func (a *ApiVipsWrapper) Type() network.VipType { return network.VipTypeAPI } +func (a *ApiVipsWrapper) Len() int { return len(a.c.cluster.APIVips) } +func (a *ApiVipsWrapper) IP(index int) string { return string(a.c.cluster.APIVips[index].IP) } func (a *ApiVipsWrapper) Verification(index int) *models.VipVerification { return a.c.cluster.APIVips[index].Verification } @@ -174,9 +176,10 @@ type IngressVipsWrapper struct { c *clusterPreprocessContext } -func (i *IngressVipsWrapper) Name() string { return "Ingress" } -func (i *IngressVipsWrapper) Len() int { return len(i.c.cluster.IngressVips) } -func (i *IngressVipsWrapper) IP(index int) string { return string(i.c.cluster.IngressVips[index].IP) } +func (i *IngressVipsWrapper) Name() string { return "Ingress" } +func (i *IngressVipsWrapper) Type() network.VipType { return network.VipTypeIngress } +func (i *IngressVipsWrapper) Len() int { return len(i.c.cluster.IngressVips) } +func (i *IngressVipsWrapper) IP(index int) string { return string(i.c.cluster.IngressVips[index].IP) } func (i *IngressVipsWrapper) Verification(index int) *models.VipVerification { return i.c.cluster.IngressVips[index].Verification } @@ -238,9 +241,14 @@ func (v *clusterValidator) areVipsValid(c *clusterPreprocessContext, vipsWrapper failed := false for i := 0; i != vipsWrapper.Len(); i++ { - verification, err := network.VerifyVip(c.cluster.Hosts, network.GetMachineCidrById(c.cluster, i), vipsWrapper.IP(i), name, + verification, err := network.VerifyVip(c.cluster.Hosts, c.cluster.MachineNetworks, vipsWrapper.IP(i), vipsWrapper.Type(), vipsWrapper.Verification(i), v.log) - failed = failed || verification != models.VipVerificationSucceeded + if verification == models.VipVerificationSucceeded { + verification, err = network.ValidateVipInHostNetworks(c.cluster.Hosts, c.cluster.MachineNetworks, vipsWrapper.IP(i), vipsWrapper.Type(), v.log) + failed = failed || verification != models.VipVerificationSucceeded + } else { + failed = true + } if err != nil { multiErr = multierror.Append(multiErr, err) } @@ -412,27 +420,6 @@ func (v *clusterValidator) isDNSDomainDefined(c *clusterPreprocessContext) (Vali return ValidationFailure, "The base domain is undefined and must be provided." } -func checkCidrsOverlapping(cluster *common.Cluster) error { - //Currently, the networks arrays can hold up to 2 subnets, one for each family - //in the same order. If machine networks is defined we assume it follows the - //same conversion - var machineNetworkCidr, clusterNetworkCidr, serviceNetworkCidr string - for index := range cluster.ClusterNetworks { - if index < len(cluster.MachineNetworks) { - machineNetworkCidr = string(cluster.MachineNetworks[index].Cidr) - } - clusterNetworkCidr = string(cluster.ClusterNetworks[index].Cidr) - serviceNetworkCidr = string(cluster.ServiceNetworks[index].Cidr) - - if err := network.VerifyClusterCIDRsNotOverlap(machineNetworkCidr, - clusterNetworkCidr, serviceNetworkCidr, - network.IsMachineNetworkRequired(cluster)); err != nil { - return err - } - } - return nil -} - func (v *clusterValidator) noCidrsOverlapping(c *clusterPreprocessContext) (ValidationStatus, string) { clusterCidrDefined, _ := v.isClusterCidrDefined(c) serviceCidrDefined, _ := v.isServiceCidrDefined(c) @@ -444,13 +431,8 @@ func (v *clusterValidator) noCidrsOverlapping(c *clusterPreprocessContext) (Vali } return ValidationPending, "At least one of the CIDRs (Machine Network, Cluster Network, Service Network) is undefined." } - if len(c.cluster.ClusterNetworks) != len(c.cluster.ServiceNetworks) { - // TODO MGMT-7587: Support any number of subnets - // Assumes that the number of cluster networks equal to the number of service networks - return ValidationError, "A mismatch between the number of Cluster and Service networks" - } - if err := checkCidrsOverlapping(c.cluster); err != nil { - return ValidationFailure, fmt.Sprintf("CIDRS Overlapping: %s.", err.Error()) + if err := network.VerifyNoNetworkCidrOverlaps(c.cluster.ClusterNetworks, c.cluster.MachineNetworks, c.cluster.ServiceNetworks); err != nil { + return ValidationFailure, err.Error() } return ValidationSuccess, "No CIDRS are overlapping." } diff --git a/internal/cluster/validator_test.go b/internal/cluster/validator_test.go index 3ed5c0ee50a..ef7ac08dc51 100644 --- a/internal/cluster/validator_test.go +++ b/internal/cluster/validator_test.go @@ -365,7 +365,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1.2.3.[56]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1.2.3.[56]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("ipv4 vips verified", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -379,7 +379,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1001:db8::6[45]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1001:db8::6[45]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("ipv4 vips verified", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -393,7 +393,7 @@ var _ = Describe("areVipsValid", func() { status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1001:db8::6[45]> are not verified yet", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1001:db8::6[45]> is not verified yet", strings.ToLower(lcontext.name)))) }) It("all successful", func() { preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -422,7 +422,7 @@ var _ = Describe("areVipsValid", func() { preprocessContext.cluster.IngressVips[0].Verification = common.VipVerificationPtr(models.VipVerificationFailed) status, message := lcontext.function(preprocessContext) Expect(status).Should(Equal(ValidationFailure)) - Expect(message).Should(MatchRegexp(fmt.Sprintf("%s vips <1.2.3.[56]> is already in use in cidr 1.2.3.0/24", strings.ToLower(lcontext.name)))) + Expect(message).Should(MatchRegexp(fmt.Sprintf("%s-vip <1.2.3.[56]> is already in use in cidr 1.2.3.0/24", strings.ToLower(lcontext.name)))) }) }) } @@ -515,7 +515,7 @@ var _ = Describe("Validator tests", func() { }} status, message := validator.areApiVipsValid(preprocessContext) Expect(status).To(Equal(ValidationFailure)) - Expect(message).To(Equal("api vips <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) + Expect(message).To(Equal("api-vip <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) // Now try with an API VIP that is not a broadcast address, this should pass validation. preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ @@ -542,7 +542,7 @@ var _ = Describe("Validator tests", func() { }} status, message := validator.areIngressVipsValid(preprocessContext) Expect(status).To(Equal(ValidationFailure)) - Expect(message).To(Equal("ingress vips <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) + Expect(message).To(Equal("ingress-vip <1.2.3.255> is the broadcast address of machine-network-cidr <1.2.3.0/24>")) // Now try with an Ingress VIP that is not a broadcast address, this should pass validation. preprocessContext.cluster = &common.Cluster{Cluster: models.Cluster{ diff --git a/internal/host/transition_test.go b/internal/host/transition_test.go index aff1920404f..aac5ae1d391 100644 --- a/internal/host/transition_test.go +++ b/internal/host/transition_test.go @@ -76,7 +76,7 @@ func createValidatorCfg() *hardware.ValidatorCfg { EnableUpgradeAgent: true, }, VersionedRequirements: hardware.VersionedRequirementsDecoder{ - "default": { + "default": models.VersionedHostRequirements{ Version: "default", MasterRequirements: &defaultMasterRequirements, WorkerRequirements: &defaultWorkerRequirements, @@ -3204,7 +3204,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleWorker, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -3218,7 +3218,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role worker"}, HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role worker"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname worker-1 is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsPlatformNetworkSettingsValid: {status: ValidationSuccess, messagePattern: "Platform RHEL is allowed"}, CompatibleWithClusterPlatform: {status: ValidationSuccess, messagePattern: "Host is compatible with cluster platform baremetal"}, IsNTPSynced: {status: ValidationFailure, messagePattern: "Host couldn't synchronize with any NTP server"}, @@ -3239,7 +3239,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Require at least 4 CPU cores for master role, found only 2", "Require at least 16.00 GiB RAM for role master, found only 8.00 GiB", "Host couldn't synchronize with any NTP server", @@ -3255,7 +3255,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationFailure, messagePattern: "Require at least 4 CPU cores for master role, found only 2"}, HasMemoryForRole: {status: ValidationFailure, messagePattern: "Require at least 16.00 GiB RAM for role master, found only 8.00 GiB"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: "Hostname worker-1 is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsPlatformNetworkSettingsValid: {status: ValidationSuccess, messagePattern: "Platform RHEL is allowed"}, CompatibleWithClusterPlatform: {status: ValidationSuccess, messagePattern: "Host is compatible with cluster platform baremetal"}, IsNTPSynced: {status: ValidationFailure, messagePattern: "Host couldn't synchronize with any NTP server"}, @@ -3369,7 +3369,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -3402,7 +3402,7 @@ var _ = Describe("Refresh Host", func() { imageStatuses: map[string]*models.ContainerImageAvailability{common.TestDefaultConfig.ImageName: common.TestImageStatusesFailure}, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks", + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use", "Host couldn't synchronize with any NTP server", "Failed to fetch container images needed for installation from image. This may be due to a network hiccup. Retry to install again. If this problem persists, "+ "check your network settings to make sure you’re not blocked.")), @@ -4234,7 +4234,7 @@ var _ = Describe("Refresh Host", func() { ntpSources: defaultNTPSources, role: models.HostRoleMaster, statusInfoChecker: makeValueChecker(formatStatusInfoFailedValidation(statusInfoNotReadyForInstall, - "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks")), + "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use")), validationsChecker: makeJsonChecker(map[validationID]validationCheckResult{ IsConnected: {status: ValidationSuccess, messagePattern: "Host is connected"}, HasInventory: {status: ValidationSuccess, messagePattern: "Valid inventory exists for the host"}, @@ -4245,7 +4245,7 @@ var _ = Describe("Refresh Host", func() { HasCPUCoresForRole: {status: ValidationSuccess, messagePattern: "Sufficient CPU cores for role master"}, HasMemoryForRole: {status: ValidationSuccess, messagePattern: "Sufficient RAM for role master"}, IsHostnameUnique: {status: ValidationSuccess, messagePattern: " is unique in cluster"}, - BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"}, + BelongsToMachineCidr: {status: ValidationFailure, messagePattern: "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use"}, IsHostnameValid: {status: ValidationSuccess, messagePattern: "Hostname .* is allowed"}, BelongsToMajorityGroup: {status: ValidationSuccess, messagePattern: "Host has connectivity to the majority of hosts in the cluster"}, IsNTPSynced: {status: ValidationSuccess, messagePattern: "Host NTP is synced"}, diff --git a/internal/host/validator.go b/internal/host/validator.go index f1ccb798943..65a6ab6c3b3 100644 --- a/internal/host/validator.go +++ b/internal/host/validator.go @@ -605,8 +605,8 @@ func (v *validator) belongsToMachineCidr(c *validationContext) (ValidationStatus if c.inventory == nil || !network.IsMachineCidrAvailable(c.cluster) { return ValidationPending, "Missing inventory or machine network CIDR" } - if !network.IsHostInPrimaryMachineNetCidr(v.log, c.cluster, c.host) { - return ValidationFailure, "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks" + if !network.IsHostInMachineNetCidrs(v.log, c.cluster, c.host) { + return ValidationFailure, "Host does not belong to machine network CIDRs. Verify that the host belongs to a listed machine network CIDR for each IP stack in use" } return ValidationSuccess, "Host belongs to all machine network CIDRs" } @@ -675,8 +675,9 @@ func (v *validator) belongsToL2MajorityGroup(c *validationContext, majorityGroup return ValidationFailure } - // TODO(mko) This rule should be revised as soon as OCP supports multiple machineNetwork - // entries using the same IP stack. + // TODO(mko) This rule must be revised to support multiple machineNetwork + // entries using the same IP stack on clusters without + // user-managed networking. (OCPBUGS-30730) areNetworksEqual := func(ipnet1, ipnet2 *net.IPNet) bool { return ipnet1.IP.Equal(ipnet2.IP) && bytes.Equal(ipnet1.Mask, ipnet2.Mask) } @@ -1874,7 +1875,7 @@ func (v *validator) noIscsiNicBelongsToMachineCidr(c *validationContext) (Valida return ValidationError, "Cannot find network interface associated to iSCSI host IP address" } - found := network.IsInterfaceInPrimaryMachineNetCidr(v.log, c.cluster, nic) + found := network.IsInterfaceInMachineNetCidr(v.log, c.cluster, nic) if found { return ValidationFailure, "Network interface connected to iSCSI disk cannot belong to machine network CIDRs" } diff --git a/internal/network/cidr_validations.go b/internal/network/cidr_validations.go index 3618023ccb4..bd49f3b537f 100644 --- a/internal/network/cidr_validations.go +++ b/internal/network/cidr_validations.go @@ -1,10 +1,11 @@ package network import ( + "errors" + "fmt" "net" "github.com/openshift/assisted-service/models" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -21,7 +22,7 @@ const MinSNOMachineMaskDelta = 1 func parseCIDR(cidr string) (ip net.IP, ipnet *net.IPNet, err error) { ip, ipnet, err = net.ParseCIDR(cidr) if err != nil { - err = errors.Wrapf(err, "Failed to parse CIDR '%s'", cidr) + err = fmt.Errorf("Failed to parse CIDR '%s': %w", cidr, err) } return } @@ -39,18 +40,18 @@ func NetworksOverlap(aCidrStr, bCidrStr string) (bool, error) { return acidr.Contains(bcidr.IP) || bcidr.Contains(acidr.IP), nil } -func VerifyNetworksNotOverlap(aCidrStr, bCidrStr string) error { +func VerifyNetworksNotOverlap(aCidrStr, bCidrStr models.Subnet) error { if aCidrStr == "" || bCidrStr == "" { return nil } - overlap, err := NetworksOverlap(aCidrStr, bCidrStr) + overlap, err := NetworksOverlap(string(aCidrStr), string(bCidrStr)) if err != nil { return err } if overlap { - return errors.Errorf("CIDRS %s and %s overlap", aCidrStr, bCidrStr) + return fmt.Errorf("CIDRS %s and %s overlap", aCidrStr, bCidrStr) } return nil } @@ -63,13 +64,13 @@ func verifySubnetCIDR(cidrStr string, minSubnetMaskSize int) error { ones, bits := cidr.Mask.Size() // We would like to allow enough addresses. Therefore, ones must be not greater than (bits-minSubnetMaskSize) if ones < 1 || ones > bits-minSubnetMaskSize { - return errors.Errorf("Address mask size must be between 1 to %d and must include at least %d addresses", bits-minSubnetMaskSize, 1< bits-MinMaskDelta { - return errors.Errorf("Host prefix, now %d, must be less than or equal to %d to allow at least 128 addresses", hostNetworkPrefix, bits-MinMaskDelta) + return fmt.Errorf("Host prefix, now %d, must be less than or equal to %d to allow at least 128 addresses", hostNetworkPrefix, bits-MinMaskDelta) } var requestedNumHosts int if numberOfHosts == 1 { @@ -119,33 +120,59 @@ func VerifyClusterCidrSize(hostNetworkPrefix int, clusterNetworkCIDR string, num // 63 to avoid overflow possibleNumHosts := uint64(1) << min(63, max(hostNetworkPrefix-clusterNetworkPrefix, 0)) if uint64(requestedNumHosts) > possibleNumHosts { - return errors.Errorf("Cluster network CIDR prefix %d does not contain enough addresses for %d hosts each one with %d prefix (%d addresses)", + return fmt.Errorf("Cluster network CIDR prefix %d does not contain enough addresses for %d hosts each one with %d prefix (%d addresses)", clusterNetworkPrefix, numberOfHosts, hostNetworkPrefix, uint64(1)< cannot be set if Machine Network CIDR is empty", vipName, vip) } - if !ipInCidr(vip, machineNetworkCidr) { - return models.VipVerificationFailed, errors.Errorf("%s <%s> does not belong to machine-network-cidr <%s>", vipName, vip, machineNetworkCidr) + if machineNetworkCidr == "" { + return models.VipVerificationFailed, errors.Errorf("%s <%s> does not belong to any Machine Network", vipName, vip) } if ipIsBroadcast(vip, machineNetworkCidr) { return models.VipVerificationFailed, errors.Errorf("%s <%s> is the broadcast address of machine-network-cidr <%s>", vipName, vip, machineNetworkCidr) @@ -139,11 +162,45 @@ func VerifyVip(hosts []*models.Host, machineNetworkCidr string, vip string, vipN msg = fmt.Sprintf("%s. The machine network range is too small for the cluster. Please redefine the network.", msg) } case models.VipVerificationUnverified: - msg = fmt.Sprintf("%s <%s> are not verified yet.", vipName, vip) + msg = fmt.Sprintf("%s <%s> is not verified yet.", vipName, vip) } return ret, errors.New(msg) } +func ValidateVipInHostNetworks(hosts []*models.Host, machineNetworks []*models.MachineNetwork, vip string, vipName VipType, log logrus.FieldLogger) (models.VipVerification, error) { + _, machineNetworkCidr := findMachineNetworkForVip(vip, machineNetworks) + _, machineIpnet, err := net.ParseCIDR(machineNetworkCidr) + if err != nil { + log.WithError(err).Errorf("can't parse machine cidr %s", machineNetworkCidr) + return models.VipVerificationFailed, fmt.Errorf("can't parse machine cidr %s", machineNetworkCidr) + } + for _, h := range hosts { + switch common.GetEffectiveRole(h) { + case models.HostRoleWorker: + if vipName != VipTypeIngress { + continue + } + case models.HostRoleMaster, models.HostRoleBootstrap: + // When there are fewer than 2 workers, control plane nodes are + // schedulable and Ingress will run on the control plane nodes + if vipName != VipTypeAPI && len(hosts) > 4 { + continue + } + default: + continue + } + if h.Inventory == "" { + continue + } + if !belongsToNetwork(log, h, machineIpnet) { + return models.VipVerificationFailed, fmt.Errorf( + "%s host %s not in the Machine Network containing the %s <%s>", + h.Role, h.ID, vipName, machineNetworkCidr) + } + } + return models.VipVerificationSucceeded, nil +} + func ValidateNoVIPAddressesDuplicates(apiVips []*models.APIVip, ingressVips []*models.IngressVip) error { var ( err error @@ -194,11 +251,11 @@ func ValidateNoVIPAddressesDuplicates(apiVips []*models.APIVip, ingressVips []*m // This function is called from places which assume it is OK for a VIP to be unverified. // The assumption is that VIPs are eventually verified by cluster validation // (i.e api-vips-valid, ingress-vips-valid) -func VerifyVips(hosts []*models.Host, machineNetworkCidr string, apiVip string, ingressVip string, log logrus.FieldLogger) error { - verification, err := VerifyVip(hosts, machineNetworkCidr, apiVip, "api-vip", nil, log) +func VerifyVips(hosts []*models.Host, machineNetworks []*models.MachineNetwork, apiVip string, ingressVip string, log logrus.FieldLogger) error { + verification, err := VerifyVip(hosts, machineNetworks, apiVip, VipTypeAPI, nil, log) // Error is ignored if the verification didn't fail if verification != models.VipVerificationFailed { - verification, err = VerifyVip(hosts, machineNetworkCidr, ingressVip, "ingress-vip", nil, log) + verification, err = VerifyVip(hosts, machineNetworks, ingressVip, VipTypeIngress, nil, log) } if verification != models.VipVerificationFailed { return ValidateNoVIPAddressesDuplicates([]*models.APIVip{{IP: models.IP(apiVip)}}, []*models.IngressVip{{IP: models.IP(ingressVip)}}) @@ -298,23 +355,6 @@ func belongsToNetwork(log logrus.FieldLogger, h *models.Host, machineIpnet *net. return false } -func GetPrimaryMachineCIDRHosts(log logrus.FieldLogger, cluster *common.Cluster) ([]*models.Host, error) { - if !IsMachineCidrAvailable(cluster) { - return nil, errors.New("Machine network CIDR was not set in cluster") - } - _, machineIpnet, err := net.ParseCIDR(GetMachineCidrById(cluster, 0)) - if err != nil { - return nil, err - } - ret := make([]*models.Host, 0) - for _, h := range cluster.Hosts { - if belongsToNetwork(log, h, machineIpnet) { - ret = append(ret, h) - } - } - return ret, nil -} - // GetPrimaryMachineCidrForUserManagedNetwork used to get the primary machine cidr in case of none platform and sno func GetPrimaryMachineCidrForUserManagedNetwork(cluster *common.Cluster, log logrus.FieldLogger) string { if IsMachineCidrAvailable(cluster) { @@ -340,25 +380,9 @@ func GetPrimaryMachineCidrForUserManagedNetwork(cluster *common.Cluster, log log return "" } -func GetSecondaryMachineCidr(cluster *common.Cluster) (string, error) { - var secondaryMachineCidr string - - if IsMachineCidrAvailable(cluster) { - secondaryMachineCidr = GetMachineCidrById(cluster, 1) - } - if secondaryMachineCidr != "" { - return secondaryMachineCidr, nil - } - return "", errors.Errorf("unable to get secondary machine network for cluster %s", cluster.ID.String()) -} - -// GetMachineNetworksFromBoostrapHost used to collect machine networks from the cluster's bootstrap host. +// GetMachineNetworksFromBootstrapHost used to collect machine networks from the cluster's bootstrap host. // The function will get at most one IPv4 and one IPv6 network. -func GetMachineNetworksFromBoostrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { - if IsMachineCidrAvailable(cluster) { - return cluster.MachineNetworks - } - +func GetMachineNetworksFromBootstrapHost(cluster *common.Cluster, log logrus.FieldLogger) []*models.MachineNetwork { bootstrap := common.GetBootstrapHost(cluster) if bootstrap == nil { log.Warnf("No bootstrap found in cluster %s", cluster.ID) @@ -518,62 +542,56 @@ func GetDefaultRouteNetworkByFamily(h *models.Host, networks map[AddressFamily][ } // Parse the Machine Network CIDRs into IPNet -func parseMachineNetworks(machineNetworks []*models.MachineNetwork) ([]*net.IPNet, error) { - var parsedCidr []*net.IPNet +func parseMachineNetworks(machineNetworks []*models.MachineNetwork) ([]*net.IPNet, []*net.IPNet, error) { + var parsedV4Cidr, parsedV6Cidr []*net.IPNet for _, machineNet := range machineNetworks { _, machineIpnet, err := net.ParseCIDR(string(machineNet.Cidr)) if err != nil { - return nil, err + return nil, nil, err + } + if IsIPV4CIDR(string(machineNet.Cidr)) { + parsedV4Cidr = append(parsedV4Cidr, machineIpnet) + } else { + parsedV6Cidr = append(parsedV6Cidr, machineIpnet) } - parsedCidr = append(parsedCidr, machineIpnet) } - return parsedCidr, nil + return parsedV4Cidr, parsedV6Cidr, nil } -// Check if a host belongs to all the networks specified as Machine Networks. -func IsHostInPrimaryMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, host *models.Host) bool { - // TODO(mko) This rule should be revised as soon as OCP supports multiple machineNetwork - // entries using the same IP stack. - return forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { - // initialize agg to true the first time this function is called - if index == 0 { - agg = true - } - - return agg && belongsToNetwork(log, host, machineIpnet) +// Check if a host belongs to one of the Machine Networks of each family. +func IsHostInMachineNetCidrs(log logrus.FieldLogger, cluster *common.Cluster, host *models.Host) bool { + inV4, inV6 := forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { + return agg || belongsToNetwork(log, host, machineIpnet) }) + return inV4 && inV6 } // Check if an interface is part of one of the networks specified as Machine Networks -func IsInterfaceInPrimaryMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, nic *models.Interface) bool { - return forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { - // initialize agg to false the first time this function is called - if index == 0 { - agg = false - } - +func IsInterfaceInMachineNetCidr(log logrus.FieldLogger, cluster *common.Cluster, nic *models.Interface) bool { + inV4, inV6 := forEachMachineNetwork(log, cluster, func(agg bool, machineIpnet *net.IPNet, index int) bool { isIPv4 := IsIPV4CIDR(machineIpnet.String()) found, _ := findMatchingIP(machineIpnet, nic, isIPv4) return agg || found }) + return inV4 || inV6 } type machineNetworkCheckFunc func(bool, *net.IPNet, int) bool // Function calling a given check fonction on each of the networks specified in cluster's Machine Networks. -func forEachMachineNetwork(log logrus.FieldLogger, cluster *common.Cluster, checkFunc machineNetworkCheckFunc) bool { +func forEachMachineNetwork(log logrus.FieldLogger, cluster *common.Cluster, checkFunc machineNetworkCheckFunc) (bool, bool) { if !IsMachineCidrAvailable(cluster) { - return false + return false, false } - machineNetwork, err := parseMachineNetworks(cluster.MachineNetworks) + machineNetworksV4, machineNetworksV6, err := parseMachineNetworks(cluster.MachineNetworks) if err != nil { log.WithError(err).Warn("Failed to parse machine networks") - return false + return false, false } - return lo.Reduce(machineNetwork, checkFunc, false) + return lo.Reduce(machineNetworksV4, checkFunc, false), lo.Reduce(machineNetworksV6, checkFunc, false) } type IPSet map[strfmt.IPv4]struct{} diff --git a/internal/network/machine_network_cidr_test.go b/internal/network/machine_network_cidr_test.go index e5c12d55a27..4946e470b22 100644 --- a/internal/network/machine_network_cidr_test.go +++ b/internal/network/machine_network_cidr_test.go @@ -103,40 +103,202 @@ var _ = Describe("inventory", func() { Expect(cidr).To(Equal("")) }) }) - Context("GetMachineCIDRHosts", func() { - It("No Machine CIDR", func() { - cluster := createCluster("1.2.5.6", "", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - _, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) + Context("ValidateVipInHostNetworks", func() { + var ( + log logrus.FieldLogger + primaryMachineCidr = "1.2.4.0/23" + secondaryMachineCidr = "1.2.8.0/23" + machineNetworks = []*models.MachineNetwork{ + {Cidr: models.Subnet(primaryMachineCidr)}, + {Cidr: models.Subnet(secondaryMachineCidr)}, + } + ) + + createRoleHost := func(role models.HostRole, cidr string) *models.Host { + return &models.Host{ + Role: role, + Inventory: createInventory(createInterface(cidr)), + } + } + + BeforeEach(func() { + log = logrus.New() + }) + It("succeeds when all hosts are in a single machine network", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when VIPs are in the wrong machine network", func() { + cluster := createCluster("1.2.9.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) }) - It("No matching Machine CIDR", func() { - cluster := createCluster("1.2.5.6", "1.1.0.0/16", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17"))) - hosts, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(hosts).To(BeEmpty()) + It("succeeds on a compact cluster with a single machine network", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) }) - It("Some matched", func() { - cluster := createCluster("1.2.5.6", "1.2.4.0/23", - createInventory(createInterface("3.3.3.3/16"), createInterface("8.8.8.8/8", "1.2.5.7/23")), - createInventory(createInterface("127.0.0.1/17")), - createInventory(createInterface("1.2.4.79/23"))) - hosts, err := GetPrimaryMachineCIDRHosts(logrus.New(), cluster) - Expect(err).To(Not(HaveOccurred())) - Expect(hosts).To(Equal([]*models.Host{ - cluster.Hosts[0], - cluster.Hosts[2], - })) + It("succeeds when VIPs are on different machine networks", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when VIPs are on wrong machine networks", func() { + cluster := createCluster("1.2.9.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + It("detects when Ingress VIP not available to a compact cluster", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.9.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + }) + It("detects when API VIP not available to bootstrap master", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.8.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when API VIP not available to control plane", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.8.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + }) + It("detects when Ingress VIP not available to worker", func() { + cluster := createCluster("1.2.5.6", primaryMachineCidr) + cluster.Hosts = []*models.Host{ + createRoleHost(models.HostRoleBootstrap, "1.2.4.101/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.102/23"), + createRoleHost(models.HostRoleMaster, "1.2.4.103/23"), + createRoleHost(models.HostRoleWorker, "1.2.4.111/23"), + createRoleHost(models.HostRoleWorker, "1.2.8.112/23"), + } + cluster.IngressVips = []*models.IngressVip{{IP: models.IP("1.2.5.7")}} + + result, err := ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), VipTypeAPI, log) + Expect(err).ToNot(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationSucceeded)) + + result, err = ValidateVipInHostNetworks(cluster.Hosts, machineNetworks, GetIngressVipById(cluster, 0), VipTypeIngress, log) + Expect(err).To(HaveOccurred()) + Expect(result).To(Equal(models.VipVerificationFailed)) }) }) Context("VerifyVips", func() { var ( log logrus.FieldLogger primaryMachineCidr = "1.2.4.0/23" + machineNetworks = []*models.MachineNetwork{ + {Cidr: models.Subnet(primaryMachineCidr)}, + } ) BeforeEach(func() { @@ -151,7 +313,7 @@ var _ = Describe("inventory", func() { }, } cluster.IngressVips = []*models.IngressVip{{IP: models.IP(GetApiVipById(cluster, 0))}} - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) }) It("Different vips", func() { @@ -163,7 +325,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("Not free", func() { @@ -175,7 +337,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.9\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) }) It("Empty", func() { @@ -187,7 +349,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("Free", func() { @@ -199,7 +361,7 @@ var _ = Describe("inventory", func() { FreeAddresses: "[{\"network\":\"1.2.4.0/23\",\"free_addresses\":[\"1.2.5.6\",\"1.2.5.8\",\"1.2.5.9\"]}]", }, } - err := VerifyVips(cluster.Hosts, primaryMachineCidr, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, machineNetworks, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).ToNot(HaveOccurred()) }) It("machine cidr is too small", func() { @@ -214,7 +376,7 @@ var _ = Describe("inventory", func() { } cluster.Hosts = []*models.Host{h, h, h, h, h} cluster.APIVips = []*models.APIVip{{IP: "1.2.5.2"}} - err := VerifyVips(cluster.Hosts, "1.2.5.0/29", GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) + err := VerifyVips(cluster.Hosts, []*models.MachineNetwork{{Cidr: "1.2.5.0/29"}}, GetApiVipById(cluster, 0), GetIngressVipById(cluster, 0), log) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(ContainSubstring("The machine network range is too small for the cluster")) }) @@ -335,7 +497,7 @@ var _ = Describe("inventory", func() { }) - Context("IsHostInPrimaryMachineNetCidr", func() { + Context("IsHostInMachineNetCidrs", func() { var log logrus.FieldLogger @@ -344,22 +506,23 @@ var _ = Describe("inventory", func() { }) DescribeTable( - "IsHostInPrimaryMachineNetCidr", + "IsHostInMachineNetCidrs", func(nics []*models.Interface, machineNetworks []*models.MachineNetwork, expectedResult bool) { cluster := createCluster("", "", createInventory(nics...)) cluster.MachineNetworks = machineNetworks - res := IsHostInPrimaryMachineNetCidr(log, cluster, cluster.Hosts[0]) + res := IsHostInMachineNetCidrs(log, cluster, cluster.Hosts[0]) Expect(res).To(Equal(expectedResult)) }, Entry("MachineNetworks is empty", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{}, false), Entry("MachineNetworks is malformed", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "a.b.c.d"}}, false), Entry("Interfaces is empty", []*models.Interface{}, []*models.MachineNetwork{{Cidr: "a.b.c.d"}}, false), Entry("Interface IP is malformed", []*models.Interface{createInterface("a.b.c.d/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.4/24"}}, false), - Entry("Host belongs to all machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}}, true), - Entry("Host doesn't belong to all machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::a1/120"}}, false), + Entry("Host belongs to dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}}, true), + Entry("Host doesn't belong to dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::a1/120"}}, false), + Entry("Host belongs to one of dual machine network CIDRs", []*models.Interface{createInterface("1.2.3.4/24"), addIPv6Addresses(createInterface(), "2001:db8::1/48")}, []*models.MachineNetwork{{Cidr: "1.2.3.0/24"}, {Cidr: "2001:db8::/48"}, {Cidr: "1.2.4.0/24"}, {Cidr: "2001:db81::/48"}}, true), ) }) - Context("IsInterfaceInPrimaryMachineNetCidr", func() { + Context("IsInterfaceInMachineNetCidr", func() { var log logrus.FieldLogger @@ -368,11 +531,11 @@ var _ = Describe("inventory", func() { }) DescribeTable( - "IsInterfaceInPrimaryMachineNetCidr", + "IsInterfaceInMachineNetCidr", func(nic *models.Interface, machineNetworks []*models.MachineNetwork, expectedResult bool) { cluster := createCluster("", "", createInventory(nic)) cluster.MachineNetworks = machineNetworks - res := IsInterfaceInPrimaryMachineNetCidr(log, cluster, nic) + res := IsInterfaceInMachineNetCidr(log, cluster, nic) Expect(res).To(Equal(expectedResult)) }, Entry("MachineNetworks is empty", createInterface("1.2.3.4/24"), []*models.MachineNetwork{}, false), diff --git a/internal/provider/baremetal/installConfig.go b/internal/provider/baremetal/installConfig.go index b60b5e2a404..241d1fc1d5c 100644 --- a/internal/provider/baremetal/installConfig.go +++ b/internal/provider/baremetal/installConfig.go @@ -53,22 +53,24 @@ func (p *baremetalProvider) AddPlatformToInstallConfig( return err } + InterfaceSearch: for _, iface := range inventory.Interfaces { // We are looking for the NIC that matches the first Machine Network configured // for the cluster. This is to ensure that BootMACAddress belongs to the NIC that // is really used and not to any fake interface even if this interface has IPs. ipAddressses := append(iface.IPV4Addresses, iface.IPV6Addresses...) for _, ip := range ipAddressses { - overlap, _ := network.NetworksOverlap(ip, string(cluster.MachineNetworks[0].Cidr)) - if overlap { - hosts[yamlHostIdx].BootMACAddress = iface.MacAddress - break + for _, machineNetwork := range cluster.MachineNetworks { + overlap, _ := network.NetworksOverlap(ip, string(machineNetwork.Cidr)) + if overlap { + hosts[yamlHostIdx].BootMACAddress = iface.MacAddress + break InterfaceSearch + } } } } if hosts[yamlHostIdx].BootMACAddress == "" { - err = errors.Errorf("Failed to find a network interface matching machine network (%s) for host %s", - cluster.MachineNetworks[0].Cidr, + err = errors.Errorf("Failed to find a network interface matching machine networks for host %s", hostutil.GetHostnameForMsg(host), ) p.Log.Error(err) diff --git a/internal/provider/base.go b/internal/provider/base.go index 557959d61fc..5a1be4ea7bd 100644 --- a/internal/provider/base.go +++ b/internal/provider/base.go @@ -1,7 +1,6 @@ package provider import ( - "github.com/go-openapi/swag" "github.com/openshift/assisted-service/internal/common" "github.com/openshift/assisted-service/internal/host/hostutil" "github.com/openshift/assisted-service/internal/installcfg" @@ -42,12 +41,14 @@ type Provider interface { PostCreateManifestsHook(cluster *common.Cluster, envVars *[]string, workDir string) error } -func GetMachineNetworkForUserManagedNetworking(log logrus.FieldLogger, cluster *common.Cluster) []installcfg.MachineNetwork { +func GetMachineNetworksForUserManagedNetworking(log logrus.FieldLogger, cluster *common.Cluster) []installcfg.MachineNetwork { bootstrapCidr := network.GetPrimaryMachineCidrForUserManagedNetwork(cluster, log) if bootstrapCidr != "" { log.Infof("Selected bootstrap machine network CIDR %s for cluster %s", bootstrapCidr, cluster.ID.String()) + if !network.IsMachineCidrAvailable(cluster) { + cluster.MachineNetworks = network.GetMachineNetworksFromBootstrapHost(cluster, log) + } var machineNetwork []installcfg.MachineNetwork - cluster.MachineNetworks = network.GetMachineNetworksFromBoostrapHost(cluster, log) for _, net := range cluster.MachineNetworks { machineNetwork = append(machineNetwork, installcfg.MachineNetwork{Cidr: string(net.Cidr)}) } @@ -86,10 +87,7 @@ func replaceMachineNetworkIfNeeded(log logrus.FieldLogger, cluster *common.Clust } func ConfigureUserManagedNetworkingInInstallConfig(log logrus.FieldLogger, cluster *common.Cluster, cfg *installcfg.InstallerConfigBaremetal) { - cfg.Networking.MachineNetwork = GetMachineNetworkForUserManagedNetworking(log, cluster) - if cluster.NetworkType != nil { - cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) - } + cfg.Networking.MachineNetwork = GetMachineNetworksForUserManagedNetworking(log, cluster) if common.IsSingleNodeCluster(cluster) { diff --git a/internal/provider/nutanix/installConfig.go b/internal/provider/nutanix/installConfig.go index e5ac215693a..45a9c68f0ad 100644 --- a/internal/provider/nutanix/installConfig.go +++ b/internal/provider/nutanix/installConfig.go @@ -60,7 +60,7 @@ func (p nutanixProvider) AddPlatformToInstallConfig( nPlatform.DeprecatedIngressVIP = network.GetIngressVipById(cluster, 0) } } else { - cfg.Networking.MachineNetwork = provider.GetMachineNetworkForUserManagedNetworking(p.Log, cluster) + cfg.Networking.MachineNetwork = provider.GetMachineNetworksForUserManagedNetworking(p.Log, cluster) if cluster.NetworkType != nil { cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) } diff --git a/internal/provider/vsphere/installConfig.go b/internal/provider/vsphere/installConfig.go index f4a92737dd7..830eb0e91fe 100644 --- a/internal/provider/vsphere/installConfig.go +++ b/internal/provider/vsphere/installConfig.go @@ -75,7 +75,7 @@ func (p vsphereProvider) AddPlatformToInstallConfig( vsPlatform.DeprecatedIngressVIP = network.GetIngressVipById(cluster, 0) } } else { - cfg.Networking.MachineNetwork = provider.GetMachineNetworkForUserManagedNetworking(p.Log, cluster) + cfg.Networking.MachineNetwork = provider.GetMachineNetworksForUserManagedNetworking(p.Log, cluster) if cluster.NetworkType != nil { cfg.Networking.NetworkType = swag.StringValue(cluster.NetworkType) }