From 1714e7cc491b50c585e59861350c911484aba2ae Mon Sep 17 00:00:00 2001 From: Mark Old Date: Mon, 13 Jan 2025 15:05:42 -0800 Subject: [PATCH] HIVE-2391: vSphere zonal support --- apis/go.mod | 6 +- apis/go.sum | 5 +- apis/hive/v1/clusterdeprovision_types.go | 3 + apis/hive/v1/vsphere/machinepools.go | 34 +- apis/hive/v1/vsphere/platform.go | 97 +- apis/hive/v1/vsphere/zz_generated.deepcopy.go | 36 +- apis/hive/v1/zz_generated.deepcopy.go | 9 +- .../github.com/openshift/installer/LICENSE | 202 ++ .../github.com/openshift/installer/NOTICE | 5 + .../installer/pkg/types/vsphere/OWNERS | 7 + .../installer/pkg/types/vsphere/doc.go | 7 + .../pkg/types/vsphere/machinepool.go | 122 ++ .../installer/pkg/types/vsphere/metadata.go | 25 + .../installer/pkg/types/vsphere/platform.go | 353 ++++ .../types/vsphere/zz_generated.deepcopy.go | 283 +++ apis/vendor/modules.txt | 6 +- .../hive.openshift.io_clusterdeployments.yaml | 545 +++++- ...hive.openshift.io_clusterdeprovisions.yaml | 10 +- .../crds/hive.openshift.io_clusterpools.yaml | 545 +++++- .../crds/hive.openshift.io_machinepools.yaml | 140 +- contrib/pkg/createcluster/create.go | 93 +- contrib/pkg/deprovision/vsphere.go | 14 +- docs/hiveutil.md | 25 +- go.mod | 4 + go.sum | 6 +- hack/app-sre/saas-template.yaml | 1672 ++++++++++++++++- hack/e2e-common.sh | 2 +- pkg/clusterresource/builder_test.go | 42 +- pkg/clusterresource/vsphere.go | 62 +- .../clusterdeployment_controller.go | 17 +- .../installconfigvalidation.go | 12 +- .../installconfigvalidation_test.go | 10 +- .../clusterpool/clusterpool_controller.go | 21 +- pkg/controller/machinepool/vsphereactuator.go | 129 +- .../machinepool/vsphereactuator_test.go | 63 +- pkg/controller/utils/credentials.go | 16 +- pkg/install/generate.go | 10 +- pkg/installmanager/installmanager.go | 13 +- ...terdeployment_validating_admission_hook.go | 29 +- ...ployment_validating_admission_hook_test.go | 27 +- .../clusterpool_validating_admission_hook.go | 16 +- ...hinepool_validating_admission_hook_test.go | 7 +- .../bombsimon/logrusr/v4/.gitignore | 81 + .../github.com/bombsimon/logrusr/v4/LICENSE | 21 + .../github.com/bombsimon/logrusr/v4/README.md | 35 + .../bombsimon/logrusr/v4/logrusr.go | 249 +++ .../apis/hive/v1/clusterdeprovision_types.go | 3 + .../hive/apis/hive/v1/vsphere/machinepools.go | 34 +- .../hive/apis/hive/v1/vsphere/platform.go | 97 +- .../hive/v1/vsphere/zz_generated.deepcopy.go | 36 +- .../apis/hive/v1/zz_generated.deepcopy.go | 9 +- .../openshift/installer/pkg/ipnet/ipnet.go | 19 + .../openshift/installer/pkg/types/aws/doc.go | 1 + .../pkg/types/aws/zz_generated.deepcopy.go | 220 +++ .../installer/pkg/types/azure/doc.go | 1 + .../pkg/types/azure/zz_generated.deepcopy.go | 352 ++++ .../installer/pkg/types/baremetal/doc.go | 1 + .../installer/pkg/types/baremetal/platform.go | 3 - .../types/baremetal/zz_generated.deepcopy.go | 163 ++ .../openshift/installer/pkg/types/doc.go | 1 + .../installer/pkg/types/external/doc.go | 3 +- .../types/external/zz_generated.deepcopy.go | 22 + .../openshift/installer/pkg/types/gcp/doc.go | 1 + .../pkg/types/gcp/zz_generated.deepcopy.go | 266 +++ .../installer/pkg/types/ibmcloud/doc.go | 1 + .../types/ibmcloud/zz_generated.deepcopy.go | 246 +++ .../installer/pkg/types/installconfig.go | 1 + .../openshift/installer/pkg/types/none/doc.go | 1 + .../pkg/types/none/zz_generated.deepcopy.go | 22 + .../installer/pkg/types/nutanix/doc.go | 1 + .../installer/pkg/types/nutanix/platform.go | 2 - .../types/nutanix/zz_generated.deepcopy.go | 299 +++ .../installer/pkg/types/openstack/doc.go | 1 + .../installer/pkg/types/openstack/platform.go | 2 - .../types/openstack/zz_generated.deepcopy.go | 219 +++ .../installer/pkg/types/ovirt/doc.go | 1 + .../installer/pkg/types/ovirt/platform.go | 2 - .../pkg/types/ovirt/zz_generated.deepcopy.go | 156 ++ .../installer/pkg/types/powervs/doc.go | 1 + .../types/powervs/zz_generated.deepcopy.go | 133 ++ .../installer/pkg/types/vsphere/doc.go | 1 + .../installer/pkg/types/vsphere/platform.go | 2 - .../types/vsphere/zz_generated.deepcopy.go | 283 +++ .../pkg/types/zz_generated.deepcopy.go | 673 +++++++ vendor/modules.txt | 6 +- 85 files changed, 7966 insertions(+), 435 deletions(-) create mode 100644 apis/vendor/github.com/openshift/installer/LICENSE create mode 100644 apis/vendor/github.com/openshift/installer/NOTICE create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/OWNERS create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/machinepool.go create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/metadata.go create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go create mode 100644 apis/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go create mode 100644 vendor/github.com/bombsimon/logrusr/v4/.gitignore create mode 100644 vendor/github.com/bombsimon/logrusr/v4/LICENSE create mode 100644 vendor/github.com/bombsimon/logrusr/v4/README.md create mode 100644 vendor/github.com/bombsimon/logrusr/v4/logrusr.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/aws/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/azure/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/baremetal/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/external/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/gcp/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/ibmcloud/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/none/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/nutanix/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/openstack/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/ovirt/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/powervs/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/installer/pkg/types/zz_generated.deepcopy.go diff --git a/apis/go.mod b/apis/go.mod index fc203fa0220..8def7a4957e 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -6,6 +6,7 @@ toolchain go1.24.6 require ( github.com/openshift/api v0.0.0-20250313134101-8a7efbfb5316 + github.com/openshift/installer v1.4.19-ec5 k8s.io/api v0.33.3 k8s.io/apimachinery v0.33.3 sigs.k8s.io/yaml v1.4.0 // indirect @@ -13,13 +14,12 @@ require ( require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.2 github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.43.0 // indirect golang.org/x/text v0.29.0 // indirect @@ -33,3 +33,5 @@ require ( // CVE-2025-22872: Some transitive deps are still using older versions. Safe to remove once go.sum shows only 0.38.0 or higher. replace golang.org/x/net => golang.org/x/net v0.38.0 + +replace github.com/openshift/installer => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca diff --git a/apis/go.sum b/apis/go.sum index d3c561b66b7..d892b1c21c7 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -3,6 +3,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca h1:HNAloxQEJCg0ssd3xBbdslsQR7F9bIUvC2GPdZbnfnI= +github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca/go.mod h1:O+iZiJnRcMHIr8tzLvl+I8JCL7CbDYr4DcLOzj1+AXw= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -29,8 +31,9 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/openshift/api v0.0.0-20250313134101-8a7efbfb5316 h1:iJ1OkAUvFbQPB6qWRDxrH1jj8iA9GA/Jx2vYz7o+i1E= github.com/openshift/api v0.0.0-20250313134101-8a7efbfb5316/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= diff --git a/apis/hive/v1/clusterdeprovision_types.go b/apis/hive/v1/clusterdeprovision_types.go index a4a6bf79d7f..414a1a66535 100644 --- a/apis/hive/v1/clusterdeprovision_types.go +++ b/apis/hive/v1/clusterdeprovision_types.go @@ -129,7 +129,10 @@ type VSphereClusterDeprovision struct { // necessary for communicating with the VCenter. CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` // VCenter is the vSphere vCenter hostname. + // Deprecated: use VCenters instead VCenter string `json:"vCenter"` + // VCenters are potentially multiple vCenter hostnames. Prefer this field over VCenter. + VCenters []string `json:"vCenters"` } // IBMClusterDeprovision contains IBM Cloud specific configuration for a ClusterDeprovision diff --git a/apis/hive/v1/vsphere/machinepools.go b/apis/hive/v1/vsphere/machinepools.go index e08c4fab713..37e27be7cf2 100644 --- a/apis/hive/v1/vsphere/machinepools.go +++ b/apis/hive/v1/vsphere/machinepools.go @@ -1,33 +1,27 @@ package vsphere +import "github.com/openshift/installer/pkg/types/vsphere" + // MachinePool stores the configuration for a machine pool installed // on vSphere. type MachinePool struct { + vsphere.MachinePool `json:",inline"` + // ResourcePool is the name of the resource pool that will be used for virtual machines. // If it is not present, a default value will be used. + // Deprecated: use Topology instead // +optional - ResourcePool string `json:"resourcePool,omitempty"` - - // NumCPUs is the total number of virtual processor cores to assign a vm. - NumCPUs int32 `json:"cpus"` - - // NumCoresPerSocket is the number of cores per socket in a vm. The number - // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. - NumCoresPerSocket int32 `json:"coresPerSocket"` - - // Memory is the size of a VM's memory in MB. - MemoryMiB int64 `json:"memoryMB"` - - // OSDisk defines the storage for instance. - OSDisk `json:"osDisk"` + DeprecatedResourcePool string `json:"resourcePool,omitempty"` // TagIDs is a list of up to 10 tags to add to the VMs that this machine set provisions in vSphere. + // Deprecated: use Topology instead // +kubebuilder:validation:MaxItems:=10 - TagIDs []string `json:"tagIDs,omitempty"` -} + DeprecatedTagIDs []string `json:"tagIDs,omitempty"` -// OSDisk defines the disk for a virtual machine. -type OSDisk struct { - // DiskSizeGB defines the size of disk in GB. - DiskSizeGB int32 `json:"diskSizeGB"` + // Topology is the vSphere topology that will be used for virtual machines. + // If it is not present, a default value will be used. + // +optional + Topology *vsphere.Topology `json:"topology,omitempty"` } + +type OSDisk = vsphere.OSDisk diff --git a/apis/hive/v1/vsphere/platform.go b/apis/hive/v1/vsphere/platform.go index 62f1f84372d..21c05b5760a 100644 --- a/apis/hive/v1/vsphere/platform.go +++ b/apis/hive/v1/vsphere/platform.go @@ -1,13 +1,24 @@ package vsphere import ( + "fmt" + "strings" + + "github.com/go-logr/logr" + "github.com/openshift/installer/pkg/types/vsphere" corev1 "k8s.io/api/core/v1" ) // Platform stores any global configuration used for vSphere platforms. type Platform struct { + // Infrastructure is the desired state of the vSphere infrastructure provider. + Infrastructure *vsphere.Platform `json:"infrastructure,omitempty"` + // VCenter is the domain name or IP address of the vCenter. - VCenter string `json:"vCenter"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedVCenter string `json:"vCenter,omitempty"` // CredentialsSecretRef refers to a secret that contains the vSphere account access // credentials: GOVC_USERNAME, GOVC_PASSWORD fields. @@ -18,18 +29,92 @@ type Platform struct { CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` // Datacenter is the name of the datacenter to use in the vCenter. - Datacenter string `json:"datacenter"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedDatacenter string `json:"datacenter,omitempty"` // DefaultDatastore is the default datastore to use for provisioning volumes. - DefaultDatastore string `json:"defaultDatastore"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedDefaultDatastore string `json:"defaultDatastore,omitempty"` // Folder is the name of the folder that will be used and/or created for // virtual machines. - Folder string `json:"folder,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedFolder string `json:"folder,omitempty"` // Cluster is the name of the cluster virtual machines will be cloned into. - Cluster string `json:"cluster,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedCluster string `json:"cluster,omitempty"` // Network specifies the name of the network to be used by the cluster. - Network string `json:"network,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedNetwork string `json:"network,omitempty"` +} + +func (p *Platform) ConvertDeprecatedFields(logger logr.Logger) { + if p.Infrastructure != nil { + return + } + + p.Infrastructure = &vsphere.Platform{ + VCenters: []vsphere.VCenter{ + { + Server: p.DeprecatedVCenter, + Port: 443, + Datacenters: []string{p.DeprecatedDatacenter}, + }, + }, + FailureDomains: []vsphere.FailureDomain{ + { + // names from https://github.com/openshift/installer/blob/f7731922a0f17a8339a3e837f72898ac77643611/pkg/types/vsphere/conversion/installconfig.go#L58-L61 + Name: "generated-failure-domain", + Region: "generated-region", + Zone: "generated-zone", + Server: p.DeprecatedVCenter, + Topology: vsphere.Topology{ + Datacenter: p.DeprecatedDatacenter, + Datastore: setDatastorePath(p.DeprecatedDefaultDatastore, p.DeprecatedDatacenter, logger), + Folder: setFolderPath(p.DeprecatedFolder, p.DeprecatedDatacenter, logger), + ComputeCluster: setComputeClusterPath(p.DeprecatedCluster, p.DeprecatedDatacenter, logger), + Networks: []string{p.DeprecatedNetwork}, + }, + }, + }, + } + +} + +// Copied (and slightly modified) from https://github.com/openshift/installer/blob/f7731922a0f17a8339a3e837f72898ac77643611/pkg/types/vsphere/conversion/installconfig.go#L75-L97 + +func setComputeClusterPath(cluster, datacenter string, logger logr.Logger) string { + if cluster != "" && !strings.HasPrefix(cluster, "/") { + logger.V(1).Info(fmt.Sprintf("computeCluster as a non-path is now depreciated please use the form: /%s/host/%s", datacenter, cluster)) + return fmt.Sprintf("/%s/host/%s", datacenter, cluster) + } + return cluster +} + +func setDatastorePath(datastore, datacenter string, logger logr.Logger) string { + if datastore != "" && !strings.HasPrefix(datastore, "/") { + logger.V(1).Info(fmt.Sprintf("datastore as a non-path is now depreciated please use the form: /%s/datastore/%s", datacenter, datastore)) + return fmt.Sprintf("/%s/datastore/%s", datacenter, datastore) + } + return datastore +} + +func setFolderPath(folder, datacenter string, logger logr.Logger) string { + if folder != "" && !strings.HasPrefix(folder, "/") { + logger.V(1).Info(fmt.Sprintf("folder as a non-path is now depreciated please use the form: /%s/vm/%s", datacenter, folder)) + return fmt.Sprintf("/%s/vm/%s", datacenter, folder) + } + return folder } diff --git a/apis/hive/v1/vsphere/zz_generated.deepcopy.go b/apis/hive/v1/vsphere/zz_generated.deepcopy.go index 6108b1415a7..62008c9aeb0 100644 --- a/apis/hive/v1/vsphere/zz_generated.deepcopy.go +++ b/apis/hive/v1/vsphere/zz_generated.deepcopy.go @@ -5,15 +5,24 @@ package vsphere +import ( + typesvsphere "github.com/openshift/installer/pkg/types/vsphere" +) + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachinePool) DeepCopyInto(out *MachinePool) { *out = *in - out.OSDisk = in.OSDisk - if in.TagIDs != nil { - in, out := &in.TagIDs, &out.TagIDs + in.MachinePool.DeepCopyInto(&out.MachinePool) + if in.DeprecatedTagIDs != nil { + in, out := &in.DeprecatedTagIDs, &out.DeprecatedTagIDs *out = make([]string, len(*in)) copy(*out, *in) } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(typesvsphere.Topology) + (*in).DeepCopyInto(*out) + } return } @@ -27,25 +36,14 @@ func (in *MachinePool) DeepCopy() *MachinePool { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSDisk) DeepCopyInto(out *OSDisk) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. -func (in *OSDisk) DeepCopy() *OSDisk { - if in == nil { - return nil - } - out := new(OSDisk) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in + if in.Infrastructure != nil { + in, out := &in.Infrastructure, &out.Infrastructure + *out = new(typesvsphere.Platform) + (*in).DeepCopyInto(*out) + } out.CredentialsSecretRef = in.CredentialsSecretRef out.CertificatesSecretRef = in.CertificatesSecretRef return diff --git a/apis/hive/v1/zz_generated.deepcopy.go b/apis/hive/v1/zz_generated.deepcopy.go index b20056e20c1..fdf0bd54b44 100644 --- a/apis/hive/v1/zz_generated.deepcopy.go +++ b/apis/hive/v1/zz_generated.deepcopy.go @@ -1070,7 +1070,7 @@ func (in *ClusterDeprovisionPlatform) DeepCopyInto(out *ClusterDeprovisionPlatfo if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(VSphereClusterDeprovision) - **out = **in + (*in).DeepCopyInto(*out) } if in.IBMCloud != nil { in, out := &in.IBMCloud, &out.IBMCloud @@ -3419,7 +3419,7 @@ func (in *Platform) DeepCopyInto(out *Platform) { if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(vsphere.Platform) - **out = **in + (*in).DeepCopyInto(*out) } if in.AgentBareMetal != nil { in, out := &in.AgentBareMetal, &out.AgentBareMetal @@ -4181,6 +4181,11 @@ func (in *VSphereClusterDeprovision) DeepCopyInto(out *VSphereClusterDeprovision *out = *in out.CredentialsSecretRef = in.CredentialsSecretRef out.CertificatesSecretRef = in.CertificatesSecretRef + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/apis/vendor/github.com/openshift/installer/LICENSE b/apis/vendor/github.com/openshift/installer/LICENSE new file mode 100644 index 00000000000..01e141147fc --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/apis/vendor/github.com/openshift/installer/NOTICE b/apis/vendor/github.com/openshift/installer/NOTICE new file mode 100644 index 00000000000..571695f5f26 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2017 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/OWNERS b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/OWNERS new file mode 100644 index 00000000000..d1c39b1b668 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file just uses aliases defined in OWNERS_ALIASES. + +approvers: + - vsphere-approvers +reviewers: + - vsphere-reviewers diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go new file mode 100644 index 00000000000..c168fcaa0c0 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go @@ -0,0 +1,7 @@ +// Package vsphere contains vSphere-specific structures for installer +// configuration and management. +// +k8s:deepcopy-gen=package +package vsphere + +// Name is name for the vsphere platform. +const Name string = "vsphere" diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/machinepool.go b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/machinepool.go new file mode 100644 index 00000000000..1a0fdb06e32 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/machinepool.go @@ -0,0 +1,122 @@ +package vsphere + +// MachinePool stores the configuration for a machine pool installed +// on vSphere. +type MachinePool struct { + // NumCPUs is the total number of virtual processor cores to assign a vm. + // + // +optional + NumCPUs int32 `json:"cpus"` + + // NumCoresPerSocket is the number of cores per socket in a vm. The number + // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + // + // +optional + NumCoresPerSocket int32 `json:"coresPerSocket"` + + // Memory is the size of a VM's memory in MB. + // + // +optional + MemoryMiB int64 `json:"memoryMB"` + + // OSDisk defines the storage for instance. + // + // +optional + OSDisk `json:"osDisk"` + + // DataDisks are additional disks to add to the VM that are not part of the VM's OVA template. + // + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=29 + DataDisks []DataDisk `json:"dataDisks"` + + // Zones defines available zones + // Zones is available in TechPreview. + // + // +omitempty + Zones []string `json:"zones,omitempty"` +} + +// OSDisk defines the disk for a virtual machine. +type OSDisk struct { + // DiskSizeGB defines the size of disk in GB. + // + // +optional + DiskSizeGB int32 `json:"diskSizeGB"` +} + +// DataDisk defines a data disk to add to the VM that is not part of the VM OVA template. +type DataDisk struct { + // name is used to identify the disk definition. name is required needs to be unique so that it can be used to + // clearly identify purpose of the disk. + // +kubebuilder:example=images_1 + // +kubebuilder:validation:MaxLength=80 + // +kubebuilder:validation:Pattern="^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$" + // +required + Name string `json:"name"` + // sizeGiB is the size of the disk in GiB. + // The maximum supported size is 16384 GiB. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=16384 + // +required + SizeGiB int32 `json:"sizeGiB"` + // provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. + // Allowed values are "Thin", "Thick", "EagerlyZeroed", and omitted. + // When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. + // When set to Thick, the full disk size will be allocated when disk is created. + // When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. + // When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere. + // +optional + ProvisioningMode ProvisioningMode `json:"provisioningMode,omitempty"` +} + +// ProvisioningMode represents the various provisioning types available to a VMs disk. +// +kubebuilder:validation:Enum=Thin;Thick;EagerlyZeroed +type ProvisioningMode string + +const ( + // ProvisioningModeThin creates the disk using thin provisioning. This means a sparse (allocate on demand) + // format with additional space optimizations. + ProvisioningModeThin ProvisioningMode = "Thin" + + // ProvisioningModeThick creates the disk with all space allocated. + ProvisioningModeThick ProvisioningMode = "Thick" + + // ProvisioningModeEagerlyZeroed creates the disk using eager zero provisioning. An eager zeroed thick disk + // has all space allocated and wiped clean of any previous contents on the physical media at + // creation time. Such disks may take longer time during creation compared to other disk formats. + ProvisioningModeEagerlyZeroed ProvisioningMode = "EagerlyZeroed" +) + +// Set sets the values from `required` to `p`. +func (p *MachinePool) Set(required *MachinePool) { + if required == nil || p == nil { + return + } + + if required.NumCPUs != 0 { + p.NumCPUs = required.NumCPUs + } + + if required.NumCoresPerSocket != 0 { + p.NumCoresPerSocket = required.NumCoresPerSocket + } + + if required.MemoryMiB != 0 { + p.MemoryMiB = required.MemoryMiB + } + + if required.OSDisk.DiskSizeGB != 0 { + p.OSDisk.DiskSizeGB = required.OSDisk.DiskSizeGB + } + + if len(required.Zones) > 0 { + p.Zones = required.Zones + } + + if len(required.DataDisks) > 0 { + p.DataDisks = required.DataDisks + } +} diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/metadata.go b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/metadata.go new file mode 100644 index 00000000000..703bc641a2b --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/metadata.go @@ -0,0 +1,25 @@ +package vsphere + +// Metadata contains vSphere metadata (e.g. for uninstalling the cluster). +type Metadata struct { + // VCenter is the domain name or IP address of the vCenter. + VCenter string `json:"vCenter,omitempty"` + // Username is the name of the user to use to connect to the vCenter. + Username string `json:"username,omitempty"` + // Password is the password for the user to use to connect to the vCenter. + Password string `json:"password,omitempty"` + // TerraformPlatform is the type... + TerraformPlatform string `json:"terraform_platform"` + // VCenters collection of vcenters when multi vcenter support is enabled + VCenters []VCenters +} + +// VCenters contains information on individual vcenter. +type VCenters struct { + // VCenter is the domain name or IP address of the vCenter. + VCenter string `json:"vCenter"` + // Username is the name of the user to use to connect to the vCenter. + Username string `json:"username"` + // Password is the password for the user to use to connect to the vCenter. + Password string `json:"password"` +} diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go new file mode 100644 index 00000000000..f8bed1eaf04 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go @@ -0,0 +1,353 @@ +package vsphere + +import ( + configv1 "github.com/openshift/api/config/v1" +) + +// DiskType is a disk provisioning type for vsphere. +// +kubebuilder:validation:Enum="";thin;thick;eagerZeroedThick +type DiskType string + +// FailureDomainType is the string representation name of the failure domain type. +// There are three defined failure domains types currently, Datacenter, ComputeCluster and HostGroup. +// Each represents a vCenter object type within a vSphere environment. +// +kubebuilder:validation:Enum=HostGroup;Datacenter;ComputeCluster +type FailureDomainType string + +const ( + // DiskTypeThin uses Thin disk provisioning type for vsphere in the cluster. + DiskTypeThin DiskType = "thin" + + // DiskTypeThick uses Thick disk provisioning type for vsphere in the cluster. + DiskTypeThick DiskType = "thick" + + // DiskTypeEagerZeroedThick uses EagerZeroedThick disk provisioning type for vsphere in the cluster. + DiskTypeEagerZeroedThick DiskType = "eagerZeroedThick" + + // TagCategoryRegion the tag category associated with regions. + TagCategoryRegion = "openshift-region" + + // TagCategoryZone the tag category associated with zones. + TagCategoryZone = "openshift-zone" +) + +const ( + // ControlPlaneRole represents control-plane nodes. + ControlPlaneRole = "control-plane" + // ComputeRole represents worker nodes. + ComputeRole = "compute" + // BootstrapRole represents bootstrap nodes. + BootstrapRole = "bootstrap" +) + +const ( + // HostGroupFailureDomain is a failure domain for a vCenter vm-host group. + HostGroupFailureDomain FailureDomainType = "HostGroup" + // ComputeClusterFailureDomain is a failure domain for a vCenter compute cluster. + ComputeClusterFailureDomain FailureDomainType = "ComputeCluster" + // DatacenterFailureDomain is a failure domain for a vCenter datacenter. + DatacenterFailureDomain FailureDomainType = "Datacenter" +) + +// Platform stores any global configuration used for vsphere platforms. +type Platform struct { + // VCenter is the domain name or IP address of the vCenter. + // Deprecated: Use VCenters.Server + DeprecatedVCenter string `json:"vCenter,omitempty"` + // Username is the name of the user to use to connect to the vCenter. + // Deprecated: Use VCenters.Username + DeprecatedUsername string `json:"username,omitempty"` + // Password is the password for the user to use to connect to the vCenter. + // Deprecated: Use VCenters.Password + DeprecatedPassword string `json:"password,omitempty"` + // Datacenter is the name of the datacenter to use in the vCenter. + // Deprecated: Use FailureDomains.Topology.Datacenter + DeprecatedDatacenter string `json:"datacenter,omitempty"` + // DefaultDatastore is the default datastore to use for provisioning volumes. + // Deprecated: Use FailureDomains.Topology.Datastore + DeprecatedDefaultDatastore string `json:"defaultDatastore,omitempty"` + // Folder is the absolute path of the folder that will be used and/or created for + // virtual machines. The absolute path is of the form //vm//. + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + // Deprecated: Use FailureDomains.Topology.Folder + DeprecatedFolder string `json:"folder,omitempty"` + // Cluster is the name of the cluster virtual machines will be cloned into. + // Deprecated: Use FailureDomains.Topology.Cluster + DeprecatedCluster string `json:"cluster,omitempty"` + // ResourcePool is the absolute path of the resource pool where virtual machines will be + // created. The absolute path is of the form //host//Resources/. + // Deprecated: Use FailureDomains.Topology.ResourcePool + DeprecatedResourcePool string `json:"resourcePool,omitempty"` + // ClusterOSImage overrides the url provided in rhcos.json to download the RHCOS OVA + ClusterOSImage string `json:"clusterOSImage,omitempty"` + + // DeprecatedAPIVIP is the virtual IP address for the api endpoint + // Deprecated: Use APIVIPs + // + // +kubebuilder:validation:format=ip + // +optional + DeprecatedAPIVIP string `json:"apiVIP,omitempty"` + + // APIVIPs contains the VIP(s) for the api endpoint. In dual stack clusters + // it contains an IPv4 and IPv6 address, otherwise only one VIP + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Format=ip + // +optional + APIVIPs []string `json:"apiVIPs,omitempty"` + + // DeprecatedIngressVIP is the virtual IP address for ingress + // Deprecated: Use IngressVIPs + // + // +kubebuilder:validation:format=ip + // +optional + DeprecatedIngressVIP string `json:"ingressVIP,omitempty"` + + // IngressVIPs contains the VIP(s) for ingress. In dual stack clusters it + // contains an IPv4 and IPv6 address, otherwise only one VIP + // + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Format=ip + // +optional + IngressVIPs []string `json:"ingressVIPs,omitempty"` + + // DefaultMachinePlatform is the default configuration used when + // installing on VSphere for machine pools which do not define their own + // platform configuration. + // +optional + DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"` + // Network specifies the name of the network to be used by the cluster. + // Deprecated: Use FailureDomains.Topology.Network + DeprecatedNetwork string `json:"network,omitempty"` + // DiskType is the name of the disk provisioning type, + // valid values are thin, thick, and eagerZeroedThick. When not + // specified, it will be set according to the default storage policy + // of vsphere. + DiskType DiskType `json:"diskType,omitempty"` + // VCenters holds the connection details for services to communicate with vCenter. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MaxItems=3 + // +kubebuilder:validation:MinItems=1 + VCenters []VCenter `json:"vcenters,omitempty"` + // FailureDomains holds the VSpherePlatformFailureDomainSpec which contains + // the definition of region, zone and the vCenter topology. + // If this is omitted failure domains (regions and zones) will not be used. + // +kubebuilder:validation:Optional + FailureDomains []FailureDomain `json:"failureDomains,omitempty"` + + // nodeNetworking contains the definition of internal and external network constraints for + // assigning the node's networking. + // If this field is omitted, networking defaults to the legacy + // address selection behavior which is to only support a single address and + // return the first one found. + // +optional + NodeNetworking *configv1.VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"` + + // LoadBalancer defines how the load balancer used by the cluster is configured. + // LoadBalancer is available in TechPreview. + // +optional + LoadBalancer *configv1.VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` + // Hosts defines network configurations to be applied by the installer. Hosts is available in TechPreview. + Hosts []*Host `json:"hosts,omitempty"` +} + +// FailureDomain holds the region and zone failure domain and +// the vCenter topology of that failure domain. +type FailureDomain struct { + // name defines the name of the FailureDomain + // This name is arbitrary but will be used + // in VSpherePlatformDeploymentZone for association. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Name string `json:"name"` + // region defines a FailureDomainCoordinate which + // includes the name of the vCenter tag, the failure domain type + // and the name of the vCenter tag category. + + // The region is the name of the tag in vCenter that is associated with the + // tag category `openshift-region`. The region name must match the tag name + // and must exist prior to installation. When the regionType is Datacenter + // the tag must be attached to the toplogy.datacenter object in vCenter. + // When the regionType is ComputeCluster the tag must be attached to the topology.computeCluster + // object in vCenter. + // +kubebuilder:validation:Required + Region string `json:"region"` + // The zone is the name of the tag in vCenter that is associated with + // the tag category `openshift-zone`. The zone name must match the tag name + // and must exist prior to installation. When zoneType is HostGroup the + // ESXi hosts defined in the provided in the topology.hostGroup field must be tagged. + // When the zoneType is ComputeCluster the tag must be attached to the topology.computeCluster + // object in vCenter. + // +kubebuilder:validation:Required + Zone string `json:"zone"` + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Server string `json:"server"` + // Topology describes a given failure domain using vSphere constructs + // +kubebuilder:validation:Required + Topology Topology `json:"topology"` + + // regionType is the type of failure domain region, the current values are "Datacenter" and "ComputeCluster" + // +kubebuilder:validation:Enum=Datacenter;ComputeCluster + // When regionType is Datacenter the zoneType must be ComputeCluster. + // When regionType is ComputeCluster the zoneType must be HostGroup + // +optional + RegionType FailureDomainType `json:"regionType,omitempty"` + // zoneType is the type of the failure domain zone, the current values are "ComputeCluster" and "HostGroup" + + // When zoneType is ComputeCluster the regionType must be Datacenter + // When zoneType is HostGroup the regionType must be ComputeCluster + // If the zoneType is HostGroup topology.hostGroup must be defined and exist in vCenter + // prior to installation. + // +kubebuilder:validation:Enum=ComputeCluster;HostGroup + // +optional + ZoneType FailureDomainType `json:"zoneType,omitempty"` +} + +// Topology holds the required and optional vCenter objects - datacenter, +// computeCluster, networks, datastore and resourcePool - to provision virtual machines. +type Topology struct { + // datacenter is the vCenter datacenter in which virtual machines will be located + // and defined as the failure domain. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=80 + Datacenter string `json:"datacenter"` + // computeCluster as the failure domain + // This is required to be a path + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + ComputeCluster string `json:"computeCluster"` + // networks is the list of networks within this failure domain + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + Networks []string `json:"networks,omitempty"` + // datastore is the name or inventory path of the datastore in which the + // virtual machine is created/located. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + Datastore string `json:"datastore"` + // resourcePool is the absolute path of the resource pool where virtual machines will be + // created. The absolute path is of the form //host//Resources/. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/host/.*?/Resources.*` + // +optional + ResourcePool string `json:"resourcePool,omitempty"` + // folder is the inventory path of the folder in which the + // virtual machine is created/located. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Folder string `json:"folder,omitempty"` + // template is the inventory path of the virtual machine or template + // that will be used for cloning. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` + // +optional + Template string `json:"template,omitempty"` + // tagIDs is an optional set of tags to add to an instance. Specified tagIDs + // must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified. + // +kubebuilder:example=`urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL` + // +optional + TagIDs []string `json:"tagIDs,omitempty"` + + // hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + // hostGroup is limited to 80 characters. + // This field is required when the ZoneType is HostGroup + // +kubebuilder:validation:MaxLength=80 + // +optional + HostGroup string `json:"hostGroup,omitempty"` +} + +// VCenter stores the vCenter connection fields +// https://github.com/kubernetes/cloud-provider-vsphere/blob/master/pkg/common/config/types_yaml.go +type VCenter struct { + // server is the fully-qualified domain name or the IP address of the vCenter server. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=255 + Server string `json:"server"` + // port is the TCP port that will be used to communicate to + // the vCenter endpoint. This is typically unchanged from + // the default of HTTPS TCP/443. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32767 + // +kubebuilder:default=443 + Port int32 `json:"port,omitempty"` + // Username is the username that will be used to connect to vCenter + // +kubebuilder:validation:Required + Username string `json:"user"` + // Password is the password for the user to use to connect to the vCenter. + // +kubebuilder:validation:Required + Password string `json:"password"` + // Datacenter in which VMs are located. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Datacenters []string `json:"datacenters"` +} + +// Host defines host VMs to generate as part of the installation. +type Host struct { + // FailureDomain refers to the name of a FailureDomain as described in https://github.com/openshift/enhancements/blob/master/enhancements/installer/vsphere-ipi-zonal.md + // +optional + FailureDomain string `json:"failureDomain"` + // NetworkDeviceSpec to be applied to the host + // +kubebuilder:validation:Required + NetworkDevice *NetworkDeviceSpec `json:"networkDevice"` + // Role defines the role of the node + // +kubebuilder:validation:Enum="";bootstrap;control-plane;compute + // +kubebuilder:validation:Required + Role string `json:"role"` +} + +// NetworkDeviceSpec defines network config for static IP assignment. +type NetworkDeviceSpec struct { + // gateway is an IPv4 or IPv6 address which represents the subnet gateway, + // for example, 192.168.1.1. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + Gateway string `json:"gateway,omitempty"` + + // ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to + // this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are + // intended to allow explicit assignment of a machine's IP address. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + // +kubebuilder:example=`192.168.1.100/24` + // +kubebuilder:example=`2001:DB8:0000:0000:244:17FF:FEB6:D37D/64` + // +kubebuilder:validation:Required + IPAddrs []string `json:"ipAddrs"` + + // nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, + // 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the + // source of IP addresses for this network device, nameservers should include a valid nameserver. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + // +kubebuilder:example=`8.8.8.8` + Nameservers []string `json:"nameservers,omitempty"` +} + +// IsControlPlane checks if the current host is a master. +func (h *Host) IsControlPlane() bool { + return h.Role == ControlPlaneRole +} + +// IsCompute checks if the current host is a worker. +func (h *Host) IsCompute() bool { + return h.Role == ComputeRole +} + +// IsBootstrap checks if the current host is a bootstrap. +func (h *Host) IsBootstrap() bool { + return h.Role == BootstrapRole +} diff --git a/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go new file mode 100644 index 00000000000..7f529e47620 --- /dev/null +++ b/apis/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go @@ -0,0 +1,283 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package vsphere + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDisk) DeepCopyInto(out *DataDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk. +func (in *DataDisk) DeepCopy() *DataDisk { + if in == nil { + return nil + } + out := new(DataDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailureDomain) DeepCopyInto(out *FailureDomain) { + *out = *in + in.Topology.DeepCopyInto(&out.Topology) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDomain. +func (in *FailureDomain) DeepCopy() *FailureDomain { + if in == nil { + return nil + } + out := new(FailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Host) DeepCopyInto(out *Host) { + *out = *in + if in.NetworkDevice != nil { + in, out := &in.NetworkDevice, &out.NetworkDevice + *out = new(NetworkDeviceSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host. +func (in *Host) DeepCopy() *Host { + if in == nil { + return nil + } + out := new(Host) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + out.OSDisk = in.OSDisk + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDisk, len(*in)) + copy(*out, *in) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VCenters, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { + *out = *in + if in.IPAddrs != nil { + in, out := &in.IPAddrs, &out.IPAddrs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. +func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { + if in == nil { + return nil + } + out := new(NetworkDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VCenter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeNetworking != nil { + in, out := &in.NodeNetworking, &out.NodeNetworking + *out = new(v1.VSpherePlatformNodeNetworking) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(v1.VSpherePlatformLoadBalancer) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*Host, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Host) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TagIDs != nil { + in, out := &in.TagIDs, &out.TagIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenter) DeepCopyInto(out *VCenter) { + *out = *in + if in.Datacenters != nil { + in, out := &in.Datacenters, &out.Datacenters + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenter. +func (in *VCenter) DeepCopy() *VCenter { + if in == nil { + return nil + } + out := new(VCenter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenters) DeepCopyInto(out *VCenters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenters. +func (in *VCenters) DeepCopy() *VCenters { + if in == nil { + return nil + } + out := new(VCenters) + in.DeepCopyInto(out) + return out +} diff --git a/apis/vendor/modules.txt b/apis/vendor/modules.txt index d276e7eaf61..07c452d5321 100644 --- a/apis/vendor/modules.txt +++ b/apis/vendor/modules.txt @@ -25,8 +25,9 @@ github.com/openshift/api/config/v1 github.com/openshift/api/machine/v1 github.com/openshift/api/machine/v1beta1 github.com/openshift/api/operator/v1 -# github.com/spf13/pflag v1.0.6 -## explicit; go 1.12 +# github.com/openshift/installer v1.4.19-ec5 => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca +## explicit; go 1.23.2 +github.com/openshift/installer/pkg/types/vsphere # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 @@ -106,3 +107,4 @@ sigs.k8s.io/structured-merge-diff/v4/value ## explicit; go 1.12 sigs.k8s.io/yaml/goyaml.v2 # golang.org/x/net => golang.org/x/net v0.38.0 +# github.com/openshift/installer => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca diff --git a/config/crds/hive.openshift.io_clusterdeployments.yaml b/config/crds/hive.openshift.io_clusterdeployments.yaml index f0888af1f6b..0f4a8dfc988 100644 --- a/config/crds/hive.openshift.io_clusterdeployments.yaml +++ b/config/crds/hive.openshift.io_clusterdeployments.yaml @@ -1218,7 +1218,10 @@ spec: type: object x-kubernetes-map-type: atomic cluster: - description: Cluster is the name of the cluster virtual machines will be cloned into. + description: |- + Cluster is the name of the cluster virtual machines will be cloned into. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string credentialsSecretRef: description: |- @@ -1237,28 +1240,554 @@ spec: type: object x-kubernetes-map-type: atomic datacenter: - description: Datacenter is the name of the datacenter to use in the vCenter. + description: |- + Datacenter is the name of the datacenter to use in the vCenter. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string defaultDatastore: - description: DefaultDatastore is the default datastore to use for provisioning volumes. + description: |- + DefaultDatastore is the default datastore to use for provisioning volumes. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string folder: description: |- Folder is the name of the folder that will be used and/or created for virtual machines. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string + infrastructure: + description: Infrastructure is the desired state of the vSphere infrastructure provider. + properties: + apiVIP: + description: |- + DeprecatedAPIVIP is the virtual IP address for the api endpoint + Deprecated: Use APIVIPs + type: string + apiVIPs: + description: |- + APIVIPs contains the VIP(s) for the api endpoint. In dual stack clusters + it contains an IPv4 and IPv6 address, otherwise only one VIP + format: ip + items: + type: string + maxItems: 2 + type: array + cluster: + description: |- + Cluster is the name of the cluster virtual machines will be cloned into. + Deprecated: Use FailureDomains.Topology.Cluster + type: string + clusterOSImage: + description: ClusterOSImage overrides the url provided in rhcos.json to download the RHCOS OVA + type: string + datacenter: + description: |- + Datacenter is the name of the datacenter to use in the vCenter. + Deprecated: Use FailureDomains.Topology.Datacenter + type: string + defaultDatastore: + description: |- + DefaultDatastore is the default datastore to use for provisioning volumes. + Deprecated: Use FailureDomains.Topology.Datastore + type: string + defaultMachinePlatform: + description: |- + DefaultMachinePlatform is the default configuration used when + installing on VSphere for machine pools which do not define their own + platform configuration. + properties: + coresPerSocket: + description: |- + NumCoresPerSocket is the number of cores per socket in a vm. The number + of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + format: int32 + type: integer + cpus: + description: NumCPUs is the total number of virtual processor cores to assign a vm. + format: int32 + type: integer + dataDisks: + description: DataDisks are additional disks to add to the VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add to the VM that is not part of the VM OVA template. + properties: + name: + description: |- + name is used to identify the disk definition. name is required needs to be unique so that it can be used to + clearly identify purpose of the disk. + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: |- + provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. + Allowed values are "Thin", "Thick", "EagerlyZeroed", and omitted. + When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. + When set to Thick, the full disk size will be allocated when disk is created. + When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. + When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere. + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: |- + sizeGiB is the size of the disk in GiB. + The maximum supported size is 16384 GiB. + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + memoryMB: + description: Memory is the size of a VM's memory in MB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGB: + description: DiskSizeGB defines the size of disk in GB. + format: int32 + type: integer + type: object + zones: + description: |- + Zones defines available zones + Zones is available in TechPreview. + items: + type: string + type: array + type: object + diskType: + description: |- + DiskType is the name of the disk provisioning type, + valid values are thin, thick, and eagerZeroedThick. When not + specified, it will be set according to the default storage policy + of vsphere. + enum: + - "" + - thin + - thick + - eagerZeroedThick + type: string + failureDomains: + description: |- + FailureDomains holds the VSpherePlatformFailureDomainSpec which contains + the definition of region, zone and the vCenter topology. + If this is omitted failure domains (regions and zones) will not be used. + items: + description: |- + FailureDomain holds the region and zone failure domain and + the vCenter topology of that failure domain. + properties: + name: + description: |- + name defines the name of the FailureDomain + This name is arbitrary but will be used + in VSpherePlatformDeploymentZone for association. + maxLength: 256 + minLength: 1 + type: string + region: + description: |- + The region is the name of the tag in vCenter that is associated with the + tag category `openshift-region`. The region name must match the tag name + and must exist prior to installation. When the regionType is Datacenter + the tag must be attached to the toplogy.datacenter object in vCenter. + When the regionType is ComputeCluster the tag must be attached to the topology.computeCluster + object in vCenter. + type: string + regionType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - Datacenter + - ComputeCluster + description: |- + regionType is the type of failure domain region, the current values are "Datacenter" and "ComputeCluster" + When regionType is Datacenter the zoneType must be ComputeCluster. + When regionType is ComputeCluster the zoneType must be HostGroup + type: string + server: + description: server is the fully-qualified domain name or the IP address of the vCenter server. + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain using vSphere constructs + properties: + computeCluster: + description: |- + computeCluster as the failure domain + This is required to be a path + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: |- + datacenter is the vCenter datacenter in which virtual machines will be located + and defined as the failure domain. + maxLength: 80 + minLength: 1 + type: string + datastore: + description: |- + datastore is the name or inventory path of the datastore in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + type: string + folder: + description: |- + folder is the inventory path of the folder in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the ZoneType is HostGroup + maxLength: 80 + type: string + networks: + description: networks is the list of networks within this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: |- + resourcePool is the absolute path of the resource pool where virtual machines will be + created. The absolute path is of the form //host//Resources/. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: |- + tagIDs is an optional set of tags to add to an instance. Specified tagIDs + must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified. + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: |- + template is the inventory path of the virtual machine or template + that will be used for cloning. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: |- + The zone is the name of the tag in vCenter that is associated with + the tag category `openshift-zone`. The zone name must match the tag name + and must exist prior to installation. When zoneType is HostGroup the + ESXi hosts defined in the provided in the topology.hostGroup field must be tagged. + When the zoneType is ComputeCluster the tag must be attached to the topology.computeCluster + object in vCenter. + type: string + zoneType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - ComputeCluster + - HostGroup + description: |- + When zoneType is ComputeCluster the regionType must be Datacenter + When zoneType is HostGroup the regionType must be ComputeCluster + If the zoneType is HostGroup topology.hostGroup must be defined and exist in vCenter + prior to installation. + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + folder: + description: |- + Folder is the absolute path of the folder that will be used and/or created for + virtual machines. The absolute path is of the form //vm//. + Deprecated: Use FailureDomains.Topology.Folder + pattern: ^/.*?/vm/.*? + type: string + hosts: + description: Hosts defines network configurations to be applied by the installer. Hosts is available in TechPreview. + items: + description: Host defines host VMs to generate as part of the installation. + properties: + failureDomain: + description: FailureDomain refers to the name of a FailureDomain as described in https://github.com/openshift/enhancements/blob/master/enhancements/installer/vsphere-ipi-zonal.md + type: string + networkDevice: + description: NetworkDeviceSpec to be applied to the host + properties: + gateway: + description: |- + gateway is an IPv4 or IPv6 address which represents the subnet gateway, + for example, 192.168.1.1. + format: ipv6 + type: string + ipAddrs: + description: |- + ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to + this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are + intended to allow explicit assignment of a machine's IP address. + example: 2001:DB8:0000:0000:244:17FF:FEB6:D37D/64 + format: ipv6 + items: + type: string + type: array + nameservers: + description: |- + nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, + 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the + source of IP addresses for this network device, nameservers should include a valid nameserver. + example: 8.8.8.8 + format: ipv6 + items: + type: string + type: array + required: + - ipAddrs + type: object + role: + description: Role defines the role of the node + enum: + - "" + - bootstrap + - control-plane + - compute + type: string + required: + - networkDevice + - role + type: object + type: array + ingressVIP: + description: |- + DeprecatedIngressVIP is the virtual IP address for ingress + Deprecated: Use IngressVIPs + type: string + ingressVIPs: + description: |- + IngressVIPs contains the VIP(s) for ingress. In dual stack clusters it + contains an IPv4 and IPv6 address, otherwise only one VIP + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + description: |- + LoadBalancer defines how the load balancer used by the cluster is configured. + LoadBalancer is available in TechPreview. + properties: + type: + default: OpenShiftManagedDefault + description: |- + type defines the type of load balancer used by the cluster on VSphere platform + which can be a user-managed or openshift-managed load balancer + that is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be deployed and it is expected that + the load balancer is configured out of band by the deployer. + When omitted, this means no opinion and the platform is left to choose a reasonable default. + The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + network: + description: |- + Network specifies the name of the network to be used by the cluster. + Deprecated: Use FailureDomains.Topology.Network + type: string + nodeNetworking: + description: |- + nodeNetworking contains the definition of internal and external network constraints for + assigning the node's networking. + If this field is omitted, networking defaults to the legacy + address selection behavior which is to only support a single address and + return the first one found. + properties: + external: + description: external represents the network configuration of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: |- + excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for use in the status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: |- + network VirtualMachine's VM Network names that will be used to when searching + for status.addresses fields. Note that if internal.networkSubnetCIDR and + external.networkSubnetCIDR are not set, then the vNIC associated to this network must + only have a single IP address assigned to it. + The available networks (port groups) can be listed using + `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: |- + networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + that will be used in respective status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: |- + excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for use in the status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: |- + network VirtualMachine's VM Network names that will be used to when searching + for status.addresses fields. Note that if internal.networkSubnetCIDR and + external.networkSubnetCIDR are not set, then the vNIC associated to this network must + only have a single IP address assigned to it. + The available networks (port groups) can be listed using + `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: |- + networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + that will be used in respective status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + password: + description: |- + Password is the password for the user to use to connect to the vCenter. + Deprecated: Use VCenters.Password + type: string + resourcePool: + description: |- + ResourcePool is the absolute path of the resource pool where virtual machines will be + created. The absolute path is of the form //host//Resources/. + Deprecated: Use FailureDomains.Topology.ResourcePool + type: string + username: + description: |- + Username is the name of the user to use to connect to the vCenter. + Deprecated: Use VCenters.Username + type: string + vCenter: + description: |- + VCenter is the domain name or IP address of the vCenter. + Deprecated: Use VCenters.Server + type: string + vcenters: + description: VCenters holds the connection details for services to communicate with vCenter. + items: + description: |- + VCenter stores the vCenter connection fields + https://github.com/kubernetes/cloud-provider-vsphere/blob/master/pkg/common/config/types_yaml.go + properties: + datacenters: + description: Datacenter in which VMs are located. + items: + type: string + minItems: 1 + type: array + password: + description: Password is the password for the user to use to connect to the vCenter. + type: string + port: + default: 443 + description: |- + port is the TCP port that will be used to communicate to + the vCenter endpoint. This is typically unchanged from + the default of HTTPS TCP/443. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain name or the IP address of the vCenter server. + maxLength: 255 + type: string + user: + description: Username is the username that will be used to connect to vCenter + type: string + required: + - datacenters + - password + - server + - user + type: object + maxItems: 3 + minItems: 1 + type: array + type: object network: - description: Network specifies the name of the network to be used by the cluster. + description: |- + Network specifies the name of the network to be used by the cluster. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string vCenter: - description: VCenter is the domain name or IP address of the vCenter. + description: |- + VCenter is the domain name or IP address of the vCenter. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string required: - certificatesSecretRef - credentialsSecretRef - - datacenter - - defaultDatastore - - vCenter type: object type: object powerState: diff --git a/config/crds/hive.openshift.io_clusterdeprovisions.yaml b/config/crds/hive.openshift.io_clusterdeprovisions.yaml index 5277fd6bbfe..b5518cff042 100644 --- a/config/crds/hive.openshift.io_clusterdeprovisions.yaml +++ b/config/crds/hive.openshift.io_clusterdeprovisions.yaml @@ -337,12 +337,20 @@ spec: type: object x-kubernetes-map-type: atomic vCenter: - description: VCenter is the vSphere vCenter hostname. + description: |- + VCenter is the vSphere vCenter hostname. + Deprecated: use VCenters instead type: string + vCenters: + description: VCenters are potentially multiple vCenter hostnames. Prefer this field over VCenter. + items: + type: string + type: array required: - certificatesSecretRef - credentialsSecretRef - vCenter + - vCenters type: object type: object required: diff --git a/config/crds/hive.openshift.io_clusterpools.yaml b/config/crds/hive.openshift.io_clusterpools.yaml index 4509176774d..b47b3660df3 100644 --- a/config/crds/hive.openshift.io_clusterpools.yaml +++ b/config/crds/hive.openshift.io_clusterpools.yaml @@ -866,7 +866,10 @@ spec: type: object x-kubernetes-map-type: atomic cluster: - description: Cluster is the name of the cluster virtual machines will be cloned into. + description: |- + Cluster is the name of the cluster virtual machines will be cloned into. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string credentialsSecretRef: description: |- @@ -885,28 +888,554 @@ spec: type: object x-kubernetes-map-type: atomic datacenter: - description: Datacenter is the name of the datacenter to use in the vCenter. + description: |- + Datacenter is the name of the datacenter to use in the vCenter. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string defaultDatastore: - description: DefaultDatastore is the default datastore to use for provisioning volumes. + description: |- + DefaultDatastore is the default datastore to use for provisioning volumes. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string folder: description: |- Folder is the name of the folder that will be used and/or created for virtual machines. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string + infrastructure: + description: Infrastructure is the desired state of the vSphere infrastructure provider. + properties: + apiVIP: + description: |- + DeprecatedAPIVIP is the virtual IP address for the api endpoint + Deprecated: Use APIVIPs + type: string + apiVIPs: + description: |- + APIVIPs contains the VIP(s) for the api endpoint. In dual stack clusters + it contains an IPv4 and IPv6 address, otherwise only one VIP + format: ip + items: + type: string + maxItems: 2 + type: array + cluster: + description: |- + Cluster is the name of the cluster virtual machines will be cloned into. + Deprecated: Use FailureDomains.Topology.Cluster + type: string + clusterOSImage: + description: ClusterOSImage overrides the url provided in rhcos.json to download the RHCOS OVA + type: string + datacenter: + description: |- + Datacenter is the name of the datacenter to use in the vCenter. + Deprecated: Use FailureDomains.Topology.Datacenter + type: string + defaultDatastore: + description: |- + DefaultDatastore is the default datastore to use for provisioning volumes. + Deprecated: Use FailureDomains.Topology.Datastore + type: string + defaultMachinePlatform: + description: |- + DefaultMachinePlatform is the default configuration used when + installing on VSphere for machine pools which do not define their own + platform configuration. + properties: + coresPerSocket: + description: |- + NumCoresPerSocket is the number of cores per socket in a vm. The number + of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. + format: int32 + type: integer + cpus: + description: NumCPUs is the total number of virtual processor cores to assign a vm. + format: int32 + type: integer + dataDisks: + description: DataDisks are additional disks to add to the VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add to the VM that is not part of the VM OVA template. + properties: + name: + description: |- + name is used to identify the disk definition. name is required needs to be unique so that it can be used to + clearly identify purpose of the disk. + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: |- + provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. + Allowed values are "Thin", "Thick", "EagerlyZeroed", and omitted. + When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. + When set to Thick, the full disk size will be allocated when disk is created. + When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. + When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere. + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: |- + sizeGiB is the size of the disk in GiB. + The maximum supported size is 16384 GiB. + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + memoryMB: + description: Memory is the size of a VM's memory in MB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGB: + description: DiskSizeGB defines the size of disk in GB. + format: int32 + type: integer + type: object + zones: + description: |- + Zones defines available zones + Zones is available in TechPreview. + items: + type: string + type: array + type: object + diskType: + description: |- + DiskType is the name of the disk provisioning type, + valid values are thin, thick, and eagerZeroedThick. When not + specified, it will be set according to the default storage policy + of vsphere. + enum: + - "" + - thin + - thick + - eagerZeroedThick + type: string + failureDomains: + description: |- + FailureDomains holds the VSpherePlatformFailureDomainSpec which contains + the definition of region, zone and the vCenter topology. + If this is omitted failure domains (regions and zones) will not be used. + items: + description: |- + FailureDomain holds the region and zone failure domain and + the vCenter topology of that failure domain. + properties: + name: + description: |- + name defines the name of the FailureDomain + This name is arbitrary but will be used + in VSpherePlatformDeploymentZone for association. + maxLength: 256 + minLength: 1 + type: string + region: + description: |- + The region is the name of the tag in vCenter that is associated with the + tag category `openshift-region`. The region name must match the tag name + and must exist prior to installation. When the regionType is Datacenter + the tag must be attached to the toplogy.datacenter object in vCenter. + When the regionType is ComputeCluster the tag must be attached to the topology.computeCluster + object in vCenter. + type: string + regionType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - Datacenter + - ComputeCluster + description: |- + regionType is the type of failure domain region, the current values are "Datacenter" and "ComputeCluster" + When regionType is Datacenter the zoneType must be ComputeCluster. + When regionType is ComputeCluster the zoneType must be HostGroup + type: string + server: + description: server is the fully-qualified domain name or the IP address of the vCenter server. + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain using vSphere constructs + properties: + computeCluster: + description: |- + computeCluster as the failure domain + This is required to be a path + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: |- + datacenter is the vCenter datacenter in which virtual machines will be located + and defined as the failure domain. + maxLength: 80 + minLength: 1 + type: string + datastore: + description: |- + datastore is the name or inventory path of the datastore in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + type: string + folder: + description: |- + folder is the inventory path of the folder in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the ZoneType is HostGroup + maxLength: 80 + type: string + networks: + description: networks is the list of networks within this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: |- + resourcePool is the absolute path of the resource pool where virtual machines will be + created. The absolute path is of the form //host//Resources/. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: |- + tagIDs is an optional set of tags to add to an instance. Specified tagIDs + must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified. + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: |- + template is the inventory path of the virtual machine or template + that will be used for cloning. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: |- + The zone is the name of the tag in vCenter that is associated with + the tag category `openshift-zone`. The zone name must match the tag name + and must exist prior to installation. When zoneType is HostGroup the + ESXi hosts defined in the provided in the topology.hostGroup field must be tagged. + When the zoneType is ComputeCluster the tag must be attached to the topology.computeCluster + object in vCenter. + type: string + zoneType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - ComputeCluster + - HostGroup + description: |- + When zoneType is ComputeCluster the regionType must be Datacenter + When zoneType is HostGroup the regionType must be ComputeCluster + If the zoneType is HostGroup topology.hostGroup must be defined and exist in vCenter + prior to installation. + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + folder: + description: |- + Folder is the absolute path of the folder that will be used and/or created for + virtual machines. The absolute path is of the form //vm//. + Deprecated: Use FailureDomains.Topology.Folder + pattern: ^/.*?/vm/.*? + type: string + hosts: + description: Hosts defines network configurations to be applied by the installer. Hosts is available in TechPreview. + items: + description: Host defines host VMs to generate as part of the installation. + properties: + failureDomain: + description: FailureDomain refers to the name of a FailureDomain as described in https://github.com/openshift/enhancements/blob/master/enhancements/installer/vsphere-ipi-zonal.md + type: string + networkDevice: + description: NetworkDeviceSpec to be applied to the host + properties: + gateway: + description: |- + gateway is an IPv4 or IPv6 address which represents the subnet gateway, + for example, 192.168.1.1. + format: ipv6 + type: string + ipAddrs: + description: |- + ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to + this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are + intended to allow explicit assignment of a machine's IP address. + example: 2001:DB8:0000:0000:244:17FF:FEB6:D37D/64 + format: ipv6 + items: + type: string + type: array + nameservers: + description: |- + nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, + 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the + source of IP addresses for this network device, nameservers should include a valid nameserver. + example: 8.8.8.8 + format: ipv6 + items: + type: string + type: array + required: + - ipAddrs + type: object + role: + description: Role defines the role of the node + enum: + - "" + - bootstrap + - control-plane + - compute + type: string + required: + - networkDevice + - role + type: object + type: array + ingressVIP: + description: |- + DeprecatedIngressVIP is the virtual IP address for ingress + Deprecated: Use IngressVIPs + type: string + ingressVIPs: + description: |- + IngressVIPs contains the VIP(s) for ingress. In dual stack clusters it + contains an IPv4 and IPv6 address, otherwise only one VIP + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + description: |- + LoadBalancer defines how the load balancer used by the cluster is configured. + LoadBalancer is available in TechPreview. + properties: + type: + default: OpenShiftManagedDefault + description: |- + type defines the type of load balancer used by the cluster on VSphere platform + which can be a user-managed or openshift-managed load balancer + that is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be deployed and it is expected that + the load balancer is configured out of band by the deployer. + When omitted, this means no opinion and the platform is left to choose a reasonable default. + The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + network: + description: |- + Network specifies the name of the network to be used by the cluster. + Deprecated: Use FailureDomains.Topology.Network + type: string + nodeNetworking: + description: |- + nodeNetworking contains the definition of internal and external network constraints for + assigning the node's networking. + If this field is omitted, networking defaults to the legacy + address selection behavior which is to only support a single address and + return the first one found. + properties: + external: + description: external represents the network configuration of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: |- + excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for use in the status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: |- + network VirtualMachine's VM Network names that will be used to when searching + for status.addresses fields. Note that if internal.networkSubnetCIDR and + external.networkSubnetCIDR are not set, then the vNIC associated to this network must + only have a single IP address assigned to it. + The available networks (port groups) can be listed using + `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: |- + networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + that will be used in respective status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: |- + excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for use in the status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: |- + network VirtualMachine's VM Network names that will be used to when searching + for status.addresses fields. Note that if internal.networkSubnetCIDR and + external.networkSubnetCIDR are not set, then the vNIC associated to this network must + only have a single IP address assigned to it. + The available networks (port groups) can be listed using + `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: |- + networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs + that will be used in respective status.addresses fields. + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + password: + description: |- + Password is the password for the user to use to connect to the vCenter. + Deprecated: Use VCenters.Password + type: string + resourcePool: + description: |- + ResourcePool is the absolute path of the resource pool where virtual machines will be + created. The absolute path is of the form //host//Resources/. + Deprecated: Use FailureDomains.Topology.ResourcePool + type: string + username: + description: |- + Username is the name of the user to use to connect to the vCenter. + Deprecated: Use VCenters.Username + type: string + vCenter: + description: |- + VCenter is the domain name or IP address of the vCenter. + Deprecated: Use VCenters.Server + type: string + vcenters: + description: VCenters holds the connection details for services to communicate with vCenter. + items: + description: |- + VCenter stores the vCenter connection fields + https://github.com/kubernetes/cloud-provider-vsphere/blob/master/pkg/common/config/types_yaml.go + properties: + datacenters: + description: Datacenter in which VMs are located. + items: + type: string + minItems: 1 + type: array + password: + description: Password is the password for the user to use to connect to the vCenter. + type: string + port: + default: 443 + description: |- + port is the TCP port that will be used to communicate to + the vCenter endpoint. This is typically unchanged from + the default of HTTPS TCP/443. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain name or the IP address of the vCenter server. + maxLength: 255 + type: string + user: + description: Username is the username that will be used to connect to vCenter + type: string + required: + - datacenters + - password + - server + - user + type: object + maxItems: 3 + minItems: 1 + type: array + type: object network: - description: Network specifies the name of the network to be used by the cluster. + description: |- + Network specifies the name of the network to be used by the cluster. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string vCenter: - description: VCenter is the domain name or IP address of the vCenter. + description: |- + VCenter is the domain name or IP address of the vCenter. + Deprecated: Please use Platform.Infrastructure instead + See also: Platform.ConvertDeprecatedFields type: string required: - certificatesSecretRef - credentialsSecretRef - - datacenter - - defaultDatastore - - vCenter type: object type: object pullSecretRef: diff --git a/config/crds/hive.openshift.io_machinepools.yaml b/config/crds/hive.openshift.io_machinepools.yaml index f2e179f4d3d..ff6fbe7fad5 100644 --- a/config/crds/hive.openshift.io_machinepools.yaml +++ b/config/crds/hive.openshift.io_machinepools.yaml @@ -770,6 +770,49 @@ spec: description: NumCPUs is the total number of virtual processor cores to assign a vm. format: int32 type: integer + dataDisks: + description: DataDisks are additional disks to add to the VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add to the VM that is not part of the VM OVA template. + properties: + name: + description: |- + name is used to identify the disk definition. name is required needs to be unique so that it can be used to + clearly identify purpose of the disk. + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: |- + provisioningMode is an optional field that specifies the provisioning type to be used by this vSphere data disk. + Allowed values are "Thin", "Thick", "EagerlyZeroed", and omitted. + When set to Thin, the disk will be made using thin provisioning allocating the bare minimum space. + When set to Thick, the full disk size will be allocated when disk is created. + When set to EagerlyZeroed, the disk will be created using eager zero provisioning. An eager zeroed thick disk has all space allocated and wiped clean of any previous contents on the physical media at creation time. Such disks may take longer time during creation compared to other disk formats. + When omitted, no setting will be applied to the data disk and the provisioning mode for the disk will be determined by the default storage policy configured for the datastore in vSphere. + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: |- + sizeGiB is the size of the disk in GiB. + The maximum supported size is 16384 GiB. + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map memoryMB: description: Memory is the size of a VM's memory in MB. format: int64 @@ -781,25 +824,106 @@ spec: description: DiskSizeGB defines the size of disk in GB. format: int32 type: integer - required: - - diskSizeGB type: object resourcePool: description: |- ResourcePool is the name of the resource pool that will be used for virtual machines. If it is not present, a default value will be used. + Deprecated: use Topology instead type: string tagIDs: - description: TagIDs is a list of up to 10 tags to add to the VMs that this machine set provisions in vSphere. + description: |- + TagIDs is a list of up to 10 tags to add to the VMs that this machine set provisions in vSphere. + Deprecated: use Topology instead items: type: string maxItems: 10 type: array - required: - - coresPerSocket - - cpus - - memoryMB - - osDisk + topology: + description: |- + Topology is the vSphere topology that will be used for virtual machines. + If it is not present, a default value will be used. + properties: + computeCluster: + description: |- + computeCluster as the failure domain + This is required to be a path + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: |- + datacenter is the vCenter datacenter in which virtual machines will be located + and defined as the failure domain. + maxLength: 80 + minLength: 1 + type: string + datastore: + description: |- + datastore is the name or inventory path of the datastore in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + type: string + folder: + description: |- + folder is the inventory path of the folder in which the + virtual machine is created/located. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: |- + hostGroup is the name of the vm-host group of type host within vCenter for this failure domain. + hostGroup is limited to 80 characters. + This field is required when the ZoneType is HostGroup + maxLength: 80 + type: string + networks: + description: networks is the list of networks within this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: |- + resourcePool is the absolute path of the resource pool where virtual machines will be + created. The absolute path is of the form //host//Resources/. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: |- + tagIDs is an optional set of tags to add to an instance. Specified tagIDs + must use URN-notation instead of display names. A maximum of 10 tag IDs may be specified. + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: |- + template is the inventory path of the virtual machine or template + that will be used for cloning. + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zones: + description: |- + Zones defines available zones + Zones is available in TechPreview. + items: + type: string + type: array type: object type: object replicas: diff --git a/contrib/pkg/createcluster/create.go b/contrib/pkg/createcluster/create.go index 8a9db9c3e54..ab20c76535b 100644 --- a/contrib/pkg/createcluster/create.go +++ b/contrib/pkg/createcluster/create.go @@ -76,9 +76,6 @@ GOVC_USERNAME and GOVC_PASSWORD - Are used to determine your vSphere credentials. GOVC_TLS_CA_CERTS - Is used to provide CA certificates for communicating with the vSphere API. -GOVC_NETWORK, GOVC_DATACENTER, GOVC_DATASTORE and GOVC_HOST (vCenter host) -can be used as alternatives to the associated commandline argument. -These are only relevant for creating a cluster on vSphere. IC_API_KEY - Used to determine your IBM Cloud API key. Required when using --cloud=ibmcloud. @@ -199,14 +196,8 @@ type Options struct { OpenStackIngressFloatingIP string // VSphere - VSphereVCenter string - VSphereDatacenter string - VSphereDefaultDataStore string - VSphereFolder string - VSphereCluster string VSphereAPIVIP string VSphereIngressVIP string - VSphereNetwork string VSpherePlatformSpecJSON string VSphereCACerts string @@ -257,7 +248,7 @@ create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=azure --azure-base-domain-resourc create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=gcp create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=ibmcloud --region="us-east" --base-domain=ibm.hive.openshift.com --manifests=/manifests --credentials-mode-manual create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=openstack --openstack-api-floating-ip=192.168.1.2 --openstack-cloud=mycloud -create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=vsphere --vsphere-vcenter=vmware.devcluster.com --vsphere-datacenter=dc1 --vsphere-default-datastore=nvme-ds1 --vsphere-api-vip=192.168.1.2 --vsphere-ingress-vip=192.168.1.3 --vsphere-cluster=devel --vsphere-network="VM Network" --vsphere-ca-certs=/path/to/cert`, +create-cluster CLUSTER_DEPLOYMENT_NAME --cloud=vsphere --vsphere-api-vip=192.168.1.2 --vsphere-ingress-vip=192.168.1.3 --vsphere-ca-certs=/path/to/cert --vsphere-platform-spec-json="{}"`, Short: "Creates a new Hive cluster deployment", Long: fmt.Sprintf(longDesc, defaultSSHPublicKeyFile, defaultPullSecretFile), Args: cobra.ExactArgs(1), @@ -363,14 +354,8 @@ OpenShift Installer publishes all the services of the cluster like API server an flags.StringVar(&opt.OpenStackIngressFloatingIP, "openstack-ingress-floating-ip", "", "Floating IP address to use for cluster's Ingress service") // vSphere flags - flags.StringVar(&opt.VSphereVCenter, "vsphere-vcenter", "", "Domain name or IP address of the vCenter") - flags.StringVar(&opt.VSphereDatacenter, "vsphere-datacenter", "", "Datacenter to use in the vCenter") - flags.StringVar(&opt.VSphereDefaultDataStore, "vsphere-default-datastore", "", "Default datastore to use for provisioning volumes") - flags.StringVar(&opt.VSphereFolder, "vsphere-folder", "", "Folder that will be used and/or created for virtual machines") - flags.StringVar(&opt.VSphereCluster, "vsphere-cluster", "", "Cluster virtual machines will be cloned into") flags.StringVar(&opt.VSphereAPIVIP, "vsphere-api-vip", "", "Virtual IP address for the api endpoint") flags.StringVar(&opt.VSphereIngressVIP, "vsphere-ingress-vip", "", "Virtual IP address for ingress application routing") - flags.StringVar(&opt.VSphereNetwork, "vsphere-network", "", "Name of the network to be used by the cluster") flags.StringVar(&opt.VSpherePlatformSpecJSON, "vsphere-platform-spec-json", "", "Installer vsphere platform spec, encoded as JSON") flags.StringVar(&opt.VSphereCACerts, "vsphere-ca-certs", "", "Path to vSphere CA certificate, multiple CA paths can be : delimited") @@ -761,77 +746,37 @@ func (o *Options) GenerateObjects() ([]runtime.Object, error) { caCerts = append(caCerts, caCert) } - vSphereNetwork := os.Getenv(constants.VSphereNetworkEnvVar) - if o.VSphereNetwork != "" { - vSphereNetwork = o.VSphereNetwork - } - - vSphereDatacenter := os.Getenv(constants.VSphereDataCenterEnvVar) - if o.VSphereDatacenter != "" { - vSphereDatacenter = o.VSphereDatacenter - } - - vSphereDatastore := os.Getenv(constants.VSphereDataStoreEnvVar) - if o.VSphereDefaultDataStore != "" { - vSphereDatastore = o.VSphereDefaultDataStore - } - - vSphereVCenter := os.Getenv(constants.VSphereVCenterEnvVar) - if o.VSphereVCenter != "" { - vSphereVCenter = o.VSphereVCenter - } - - vSphereFolder := o.VSphereFolder - vSphereCluster := o.VSphereCluster - vSphereAPIVIP := o.VSphereAPIVIP - vSphereIngressVIP := o.VSphereIngressVIP - platformBytes := []byte(os.Getenv(constants.VSpherePlatformSpecJSONEnvVar)) if o.VSpherePlatformSpecJSON != "" { platformBytes = []byte(o.VSpherePlatformSpecJSON) } + platform := installervsphere.Platform{} + err = json.Unmarshal(platformBytes, &platform) + if err != nil { + return nil, fmt.Errorf("error decoding platform %s: %w", o.VSpherePlatformSpecJSON, err) + } - if len(platformBytes) > 0 { - o.log.Info("using provided installer platform spec instead of other flags for vsphere (size: %v)", len(platformBytes)) - platform := installervsphere.Platform{} - err = json.Unmarshal(platformBytes, &platform) - if err != nil { - return nil, fmt.Errorf("error decoding platform %s: %w", o.VSpherePlatformSpecJSON, err) + for i := range platform.VCenters { + if platform.VCenters[i].Username == "" { + platform.VCenters[i].Username = vsphereUsername } - - vSphereVCenter = platform.VCenters[0].Server - vSphereDatacenter = platform.VCenters[0].Datacenters[0] - if vSphereDatacenter == "" { - vSphereDatacenter = platform.FailureDomains[0].Topology.Datacenter + if platform.VCenters[i].Password == "" { + platform.VCenters[i].Password = vspherePassword } - vSphereDatastore = platform.FailureDomains[0].Topology.Datastore - vSphereFolder = platform.FailureDomains[0].Topology.Folder - vSphereCluster = platform.FailureDomains[0].Topology.ComputeCluster - vSphereNetwork = platform.FailureDomains[0].Topology.Networks[0] } - if vSphereDatacenter == "" { - return nil, fmt.Errorf("must provide --vsphere-datacenter or set %s env var", constants.VSphereDataCenterEnvVar) - } - if vSphereDatastore == "" { - return nil, fmt.Errorf("must provide --vsphere-default-datastore or set %s env var", constants.VSphereDataStoreEnvVar) + if len(platform.APIVIPs) == 0 { + platform.APIVIPs = []string{o.VSphereAPIVIP} } - if vSphereVCenter == "" { - return nil, fmt.Errorf("must provide --vsphere-vcenter or set %s env var", constants.VSphereVCenterEnvVar) + if len(platform.IngressVIPs) == 0 { + platform.IngressVIPs = []string{o.VSphereIngressVIP} } vsphereProvider := &clusterresource.VSphereCloudBuilder{ - VCenter: vSphereVCenter, - Username: vsphereUsername, - Password: vspherePassword, - Datacenter: vSphereDatacenter, - DefaultDatastore: vSphereDatastore, - Folder: vSphereFolder, - Cluster: vSphereCluster, - APIVIP: vSphereAPIVIP, - IngressVIP: vSphereIngressVIP, - Network: vSphereNetwork, - CACert: bytes.Join(caCerts, []byte("\n")), + Username: vsphereUsername, + Password: vspherePassword, + CACert: bytes.Join(caCerts, []byte("\n")), + Infrastructure: &platform, } builder.CloudBuilder = vsphereProvider case cloudIBM: diff --git a/contrib/pkg/deprovision/vsphere.go b/contrib/pkg/deprovision/vsphere.go index 9895ba45e81..5c3fa34bed0 100644 --- a/contrib/pkg/deprovision/vsphere.go +++ b/contrib/pkg/deprovision/vsphere.go @@ -3,6 +3,7 @@ package deprovision import ( "fmt" "os" + "strings" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -90,13 +91,20 @@ func (o *vSphereOptions) Run() error { return err } + var vCenters []typesvsphere.VCenters + for _, vCenter := range strings.Split(o.vCenter, "::") { + vCenters = append(vCenters, typesvsphere.VCenters{ + VCenter: vCenter, + Username: o.username, + Password: o.password, + }) + } + metadata := &types.ClusterMetadata{ InfraID: o.infraID, ClusterPlatformMetadata: types.ClusterPlatformMetadata{ VSphere: &typesvsphere.Metadata{ - VCenter: o.vCenter, - Username: o.username, - Password: o.password, + VCenters: vCenters, }, }, } diff --git a/docs/hiveutil.md b/docs/hiveutil.md index 2fdadacdfa7..c4930c60025 100644 --- a/docs/hiveutil.md +++ b/docs/hiveutil.md @@ -64,20 +64,27 @@ NOTE: For deprovisioning a cluster, `hiveutil` will use creds from `~/.gcp/osSer Set credentials/connection information in the following environment variables. `GOVC_USERNAME` should hold the vSphere username, `GOVC_PASSWORD` should be set to the vSphere user's password. If the vCenter instance is using self-signed certificates or is otherwise untrusted by the system being used to connect to vCenter, `GOVC_TLS_CA_CERTS` should be set to the path of a file containing the CA certificate for the vCenter instance. +The `VSPHERE_INSTALLER_PLATFORM_SPEC_JSON` environment variable should be hold a json (NOT yaml) blob containing a [vSphere platform spec](https://pkg.go.dev/github.com/openshift/installer/pkg/types/vsphere#Platform) ([documented here](https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/installing_on_vmware_vsphere/installer-provisioned-infrastructure#installation-installer-provisioned-vsphere-config-yaml_installing-vsphere-installer-provisioned-customizations)). If you used an older version of `hiveutil` to create vSphere clusters, the many old parameters (network, datastore, etc) should now be specified inside this json object. If you have this json blob stored as a file on disk, you can load it into an environment variable like so: + +```bash +$ export VSPHERE_INSTALLER_PLATFORM_SPEC_JSON=$( golang.org/x/net v0.38.0 // From installer: This is to force capi back for the older provider version replace sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.3 + +replace github.com/openshift/installer => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca diff --git a/go.sum b/go.sum index 7cf6671d577..b9aed555411 100644 --- a/go.sum +++ b/go.sum @@ -216,6 +216,8 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4= +github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= @@ -283,6 +285,8 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca h1:HNAloxQEJCg0ssd3xBbdslsQR7F9bIUvC2GPdZbnfnI= +github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca/go.mod h1:O+iZiJnRcMHIr8tzLvl+I8JCL7CbDYr4DcLOzj1+AXw= github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf h1:A2XbJkAuMMFy/9EftoubSKBUIyiOm6Z8+X5G7QpS6so= github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -787,8 +791,6 @@ github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-202409090 github.com/openshift/cluster-control-plane-machine-set-operator v0.0.0-20240909043600-373ac49835bf/go.mod h1:2fZsjZ3QSPkoMUc8QntXfeBb8AnvW+WIYwwQX8vmgvQ= github.com/openshift/generic-admission-server v1.14.1-0.20250715141119-66c5d0a9c5e6 h1:+2D45NQ1B53nU0Lbsqa6rX9duCcO1df6U1HGCboijEQ= github.com/openshift/generic-admission-server v1.14.1-0.20250715141119-66c5d0a9c5e6/go.mod h1:5gqtfZSOJ7CWfrzPJ5c6B1Ju3r+W4UHIWXuav/+OGJQ= -github.com/openshift/installer v1.4.19-ec5 h1:xiQ3D4DLf170cqfdp4Bo8uqVCyRkfQdjIC1DhXeOfpw= -github.com/openshift/installer v1.4.19-ec5/go.mod h1:xTqIPA65EoaSpL5KZKOcxGcoh9HRs+aTdi3OUeedgKk= github.com/openshift/library-go v0.0.0-20250114132252-af5b21ebad2f h1:inJ2wNKevyuR+7VfBecMJ+HjEsVwbTtw0x3SoZZ24uI= github.com/openshift/library-go v0.0.0-20250114132252-af5b21ebad2f/go.mod h1:Dsex3pPrZ+krgVFZIv21DGzeFvcC0muMvaQe9E/q0uI= github.com/openshift/machine-api-operator v0.2.1-0.20240930121047-57b7917e6140 h1:VKn644y4Ra94L8YZ3JT8ZuOZMEspZt8yoIjHeaRM3pA= diff --git a/hack/app-sre/saas-template.yaml b/hack/app-sre/saas-template.yaml index 6b03e193878..a85ef15a804 100644 --- a/hack/app-sre/saas-template.yaml +++ b/hack/app-sre/saas-template.yaml @@ -2233,8 +2233,12 @@ objects: type: object x-kubernetes-map-type: atomic cluster: - description: Cluster is the name of the cluster virtual + description: 'Cluster is the name of the cluster virtual machines will be cloned into. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string credentialsSecretRef: description: 'CredentialsSecretRef refers to a secret that @@ -2259,33 +2263,750 @@ objects: type: object x-kubernetes-map-type: atomic datacenter: - description: Datacenter is the name of the datacenter to + description: 'Datacenter is the name of the datacenter to use in the vCenter. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string defaultDatastore: - description: DefaultDatastore is the default datastore to - use for provisioning volumes. + description: 'DefaultDatastore is the default datastore + to use for provisioning volumes. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string folder: description: 'Folder is the name of the folder that will be used and/or created for - virtual machines.' + virtual machines. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string + infrastructure: + description: Infrastructure is the desired state of the + vSphere infrastructure provider. + properties: + apiVIP: + description: 'DeprecatedAPIVIP is the virtual IP address + for the api endpoint + + Deprecated: Use APIVIPs' + type: string + apiVIPs: + description: 'APIVIPs contains the VIP(s) for the api + endpoint. In dual stack clusters + + it contains an IPv4 and IPv6 address, otherwise only + one VIP' + format: ip + items: + type: string + maxItems: 2 + type: array + cluster: + description: 'Cluster is the name of the cluster virtual + machines will be cloned into. + + Deprecated: Use FailureDomains.Topology.Cluster' + type: string + clusterOSImage: + description: ClusterOSImage overrides the url provided + in rhcos.json to download the RHCOS OVA + type: string + datacenter: + description: 'Datacenter is the name of the datacenter + to use in the vCenter. + + Deprecated: Use FailureDomains.Topology.Datacenter' + type: string + defaultDatastore: + description: 'DefaultDatastore is the default datastore + to use for provisioning volumes. + + Deprecated: Use FailureDomains.Topology.Datastore' + type: string + defaultMachinePlatform: + description: 'DefaultMachinePlatform is the default + configuration used when + + installing on VSphere for machine pools which do not + define their own + + platform configuration.' + properties: + coresPerSocket: + description: 'NumCoresPerSocket is the number of + cores per socket in a vm. The number + + of vCPUs on the vm will be NumCPUs/NumCoresPerSocket.' + format: int32 + type: integer + cpus: + description: NumCPUs is the total number of virtual + processor cores to assign a vm. + format: int32 + type: integer + dataDisks: + description: DataDisks are additional disks to add + to the VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add + to the VM that is not part of the VM OVA template. + properties: + name: + description: 'name is used to identify the + disk definition. name is required needs + to be unique so that it can be used to + + clearly identify purpose of the disk.' + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: 'provisioningMode is an optional + field that specifies the provisioning type + to be used by this vSphere data disk. + + Allowed values are "Thin", "Thick", "EagerlyZeroed", + and omitted. + + When set to Thin, the disk will be made + using thin provisioning allocating the bare + minimum space. + + When set to Thick, the full disk size will + be allocated when disk is created. + + When set to EagerlyZeroed, the disk will + be created using eager zero provisioning. + An eager zeroed thick disk has all space + allocated and wiped clean of any previous + contents on the physical media at creation + time. Such disks may take longer time during + creation compared to other disk formats. + + When omitted, no setting will be applied + to the data disk and the provisioning mode + for the disk will be determined by the default + storage policy configured for the datastore + in vSphere.' + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: 'sizeGiB is the size of the disk + in GiB. + + The maximum supported size is 16384 GiB.' + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + memoryMB: + description: Memory is the size of a VM's memory + in MB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGB: + description: DiskSizeGB defines the size of + disk in GB. + format: int32 + type: integer + type: object + zones: + description: 'Zones defines available zones + + Zones is available in TechPreview.' + items: + type: string + type: array + type: object + diskType: + description: 'DiskType is the name of the disk provisioning + type, + + valid values are thin, thick, and eagerZeroedThick. + When not + + specified, it will be set according to the default + storage policy + + of vsphere.' + enum: + - '' + - thin + - thick + - eagerZeroedThick + type: string + failureDomains: + description: 'FailureDomains holds the VSpherePlatformFailureDomainSpec + which contains + + the definition of region, zone and the vCenter topology. + + If this is omitted failure domains (regions and zones) + will not be used.' + items: + description: 'FailureDomain holds the region and zone + failure domain and + + the vCenter topology of that failure domain.' + properties: + name: + description: 'name defines the name of the FailureDomain + + This name is arbitrary but will be used + + in VSpherePlatformDeploymentZone for association.' + maxLength: 256 + minLength: 1 + type: string + region: + description: 'The region is the name of the tag + in vCenter that is associated with the + + tag category `openshift-region`. The region + name must match the tag name + + and must exist prior to installation. When the + regionType is Datacenter + + the tag must be attached to the toplogy.datacenter + object in vCenter. + + When the regionType is ComputeCluster the tag + must be attached to the topology.computeCluster + + object in vCenter.' + type: string + regionType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - Datacenter + - ComputeCluster + description: 'regionType is the type of failure + domain region, the current values are "Datacenter" + and "ComputeCluster" + + When regionType is Datacenter the zoneType must + be ComputeCluster. + + When regionType is ComputeCluster the zoneType + must be HostGroup' + type: string + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure + domain using vSphere constructs + properties: + computeCluster: + description: 'computeCluster as the failure + domain + + This is required to be a path' + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: 'datacenter is the vCenter datacenter + in which virtual machines will be located + + and defined as the failure domain.' + maxLength: 80 + minLength: 1 + type: string + datastore: + description: 'datastore is the name or inventory + path of the datastore in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + type: string + folder: + description: 'folder is the inventory path + of the folder in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: 'hostGroup is the name of the + vm-host group of type host within vCenter + for this failure domain. + + hostGroup is limited to 80 characters. + + This field is required when the ZoneType + is HostGroup' + maxLength: 80 + type: string + networks: + description: networks is the list of networks + within this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: 'resourcePool is the absolute + path of the resource pool where virtual + machines will be + + created. The absolute path is of the form + //host//Resources/.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: 'tagIDs is an optional set of + tags to add to an instance. Specified tagIDs + + must use URN-notation instead of display + names. A maximum of 10 tag IDs may be specified.' + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: 'template is the inventory path + of the virtual machine or template + + that will be used for cloning.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: 'The zone is the name of the tag + in vCenter that is associated with + + the tag category `openshift-zone`. The zone + name must match the tag name + + and must exist prior to installation. When zoneType + is HostGroup the + + ESXi hosts defined in the provided in the topology.hostGroup + field must be tagged. + + When the zoneType is ComputeCluster the tag + must be attached to the topology.computeCluster + + object in vCenter.' + type: string + zoneType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - ComputeCluster + - HostGroup + description: 'When zoneType is ComputeCluster + the regionType must be Datacenter + + When zoneType is HostGroup the regionType must + be ComputeCluster + + If the zoneType is HostGroup topology.hostGroup + must be defined and exist in vCenter + + prior to installation.' + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + folder: + description: 'Folder is the absolute path of the folder + that will be used and/or created for + + virtual machines. The absolute path is of the form + //vm//. + + Deprecated: Use FailureDomains.Topology.Folder' + pattern: ^/.*?/vm/.*? + type: string + hosts: + description: Hosts defines network configurations to + be applied by the installer. Hosts is available in + TechPreview. + items: + description: Host defines host VMs to generate as + part of the installation. + properties: + failureDomain: + description: FailureDomain refers to the name + of a FailureDomain as described in https://github.com/openshift/enhancements/blob/master/enhancements/installer/vsphere-ipi-zonal.md + type: string + networkDevice: + description: NetworkDeviceSpec to be applied to + the host + properties: + gateway: + description: 'gateway is an IPv4 or IPv6 address + which represents the subnet gateway, + + for example, 192.168.1.1.' + format: ipv6 + type: string + ipAddrs: + description: 'ipAddrs is a list of one or + more IPv4 and/or IPv6 addresses and CIDR + to assign to + + this device, for example, 192.168.1.100/24. + IP addresses provided via ipAddrs are + + intended to allow explicit assignment of + a machine''s IP address.' + example: 2001:DB8:0000:0000:244:17FF:FEB6:D37D/64 + format: ipv6 + items: + type: string + type: array + nameservers: + description: 'nameservers is a list of IPv4 + and/or IPv6 addresses used as DNS nameservers, + for example, + + 8.8.8.8. a nameserver is not provided by + a fulfilled IPAddressClaim. If DHCP is not + the + + source of IP addresses for this network + device, nameservers should include a valid + nameserver.' + example: 8.8.8.8 + format: ipv6 + items: + type: string + type: array + required: + - ipAddrs + type: object + role: + description: Role defines the role of the node + enum: + - '' + - bootstrap + - control-plane + - compute + type: string + required: + - networkDevice + - role + type: object + type: array + ingressVIP: + description: 'DeprecatedIngressVIP is the virtual IP + address for ingress + + Deprecated: Use IngressVIPs' + type: string + ingressVIPs: + description: 'IngressVIPs contains the VIP(s) for ingress. + In dual stack clusters it + + contains an IPv4 and IPv6 address, otherwise only + one VIP' + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + description: 'LoadBalancer defines how the load balancer + used by the cluster is configured. + + LoadBalancer is available in TechPreview.' + properties: + type: + default: OpenShiftManagedDefault + description: 'type defines the type of load balancer + used by the cluster on VSphere platform + + which can be a user-managed or openshift-managed + load balancer + + that is to be used for the OpenShift API and Ingress + endpoints. + + When set to OpenShiftManagedDefault the static + pods in charge of API and Ingress traffic load-balancing + + defined in the machine config operator will be + deployed. + + When set to UserManaged these static pods will + not be deployed and it is expected that + + the load balancer is configured out of band by + the deployer. + + When omitted, this means no opinion and the platform + is left to choose a reasonable default. + + The default value is OpenShiftManagedDefault.' + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + network: + description: 'Network specifies the name of the network + to be used by the cluster. + + Deprecated: Use FailureDomains.Topology.Network' + type: string + nodeNetworking: + description: 'nodeNetworking contains the definition + of internal and external network constraints for + + assigning the node''s networking. + + If this field is omitted, networking defaults to the + legacy + + address selection behavior which is to only support + a single address and + + return the first one found.' + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: 'excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + + the IP address from the VirtualMachine''s + VM for use in the status.addresses fields.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: 'network VirtualMachine''s VM Network + names that will be used to when searching + + for status.addresses fields. Note that if + internal.networkSubnetCIDR and + + external.networkSubnetCIDR are not set, then + the vNIC associated to this network must + + only have a single IP address assigned to + it. + + The available networks (port groups) can be + listed using + + `govc ls ''network/*''`' + type: string + networkSubnetCidr: + description: 'networkSubnetCidr IP address on + VirtualMachine''s network interfaces included + in the fields'' CIDRs + + that will be used in respective status.addresses + fields.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: 'excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + + the IP address from the VirtualMachine''s + VM for use in the status.addresses fields.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: 'network VirtualMachine''s VM Network + names that will be used to when searching + + for status.addresses fields. Note that if + internal.networkSubnetCIDR and + + external.networkSubnetCIDR are not set, then + the vNIC associated to this network must + + only have a single IP address assigned to + it. + + The available networks (port groups) can be + listed using + + `govc ls ''network/*''`' + type: string + networkSubnetCidr: + description: 'networkSubnetCidr IP address on + VirtualMachine''s network interfaces included + in the fields'' CIDRs + + that will be used in respective status.addresses + fields.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + password: + description: 'Password is the password for the user + to use to connect to the vCenter. + + Deprecated: Use VCenters.Password' + type: string + resourcePool: + description: 'ResourcePool is the absolute path of the + resource pool where virtual machines will be + + created. The absolute path is of the form //host//Resources/. + + Deprecated: Use FailureDomains.Topology.ResourcePool' + type: string + username: + description: 'Username is the name of the user to use + to connect to the vCenter. + + Deprecated: Use VCenters.Username' + type: string + vCenter: + description: 'VCenter is the domain name or IP address + of the vCenter. + + Deprecated: Use VCenters.Server' + type: string + vcenters: + description: VCenters holds the connection details for + services to communicate with vCenter. + items: + description: 'VCenter stores the vCenter connection + fields + + https://github.com/kubernetes/cloud-provider-vsphere/blob/master/pkg/common/config/types_yaml.go' + properties: + datacenters: + description: Datacenter in which VMs are located. + items: + type: string + minItems: 1 + type: array + password: + description: Password is the password for the + user to use to connect to the vCenter. + type: string + port: + default: 443 + description: 'port is the TCP port that will be + used to communicate to + + the vCenter endpoint. This is typically unchanged + from + + the default of HTTPS TCP/443.' + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + maxLength: 255 + type: string + user: + description: Username is the username that will + be used to connect to vCenter + type: string + required: + - datacenters + - password + - server + - user + type: object + maxItems: 3 + minItems: 1 + type: array + type: object network: - description: Network specifies the name of the network to - be used by the cluster. + description: 'Network specifies the name of the network + to be used by the cluster. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string vCenter: - description: VCenter is the domain name or IP address of + description: 'VCenter is the domain name or IP address of the vCenter. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string required: - certificatesSecretRef - credentialsSecretRef - - datacenter - - defaultDatastore - - vCenter type: object type: object powerState: @@ -4016,12 +4737,21 @@ objects: type: object x-kubernetes-map-type: atomic vCenter: - description: VCenter is the vSphere vCenter hostname. + description: 'VCenter is the vSphere vCenter hostname. + + Deprecated: use VCenters instead' type: string + vCenters: + description: VCenters are potentially multiple vCenter hostnames. + Prefer this field over VCenter. + items: + type: string + type: array required: - certificatesSecretRef - credentialsSecretRef - vCenter + - vCenters type: object type: object required: @@ -5315,8 +6045,12 @@ objects: type: object x-kubernetes-map-type: atomic cluster: - description: Cluster is the name of the cluster virtual + description: 'Cluster is the name of the cluster virtual machines will be cloned into. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string credentialsSecretRef: description: 'CredentialsSecretRef refers to a secret that @@ -5341,33 +6075,750 @@ objects: type: object x-kubernetes-map-type: atomic datacenter: - description: Datacenter is the name of the datacenter to + description: 'Datacenter is the name of the datacenter to use in the vCenter. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string defaultDatastore: - description: DefaultDatastore is the default datastore to - use for provisioning volumes. + description: 'DefaultDatastore is the default datastore + to use for provisioning volumes. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string folder: description: 'Folder is the name of the folder that will be used and/or created for - virtual machines.' + virtual machines. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string + infrastructure: + description: Infrastructure is the desired state of the + vSphere infrastructure provider. + properties: + apiVIP: + description: 'DeprecatedAPIVIP is the virtual IP address + for the api endpoint + + Deprecated: Use APIVIPs' + type: string + apiVIPs: + description: 'APIVIPs contains the VIP(s) for the api + endpoint. In dual stack clusters + + it contains an IPv4 and IPv6 address, otherwise only + one VIP' + format: ip + items: + type: string + maxItems: 2 + type: array + cluster: + description: 'Cluster is the name of the cluster virtual + machines will be cloned into. + + Deprecated: Use FailureDomains.Topology.Cluster' + type: string + clusterOSImage: + description: ClusterOSImage overrides the url provided + in rhcos.json to download the RHCOS OVA + type: string + datacenter: + description: 'Datacenter is the name of the datacenter + to use in the vCenter. + + Deprecated: Use FailureDomains.Topology.Datacenter' + type: string + defaultDatastore: + description: 'DefaultDatastore is the default datastore + to use for provisioning volumes. + + Deprecated: Use FailureDomains.Topology.Datastore' + type: string + defaultMachinePlatform: + description: 'DefaultMachinePlatform is the default + configuration used when + + installing on VSphere for machine pools which do not + define their own + + platform configuration.' + properties: + coresPerSocket: + description: 'NumCoresPerSocket is the number of + cores per socket in a vm. The number + + of vCPUs on the vm will be NumCPUs/NumCoresPerSocket.' + format: int32 + type: integer + cpus: + description: NumCPUs is the total number of virtual + processor cores to assign a vm. + format: int32 + type: integer + dataDisks: + description: DataDisks are additional disks to add + to the VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add + to the VM that is not part of the VM OVA template. + properties: + name: + description: 'name is used to identify the + disk definition. name is required needs + to be unique so that it can be used to + + clearly identify purpose of the disk.' + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: 'provisioningMode is an optional + field that specifies the provisioning type + to be used by this vSphere data disk. + + Allowed values are "Thin", "Thick", "EagerlyZeroed", + and omitted. + + When set to Thin, the disk will be made + using thin provisioning allocating the bare + minimum space. + + When set to Thick, the full disk size will + be allocated when disk is created. + + When set to EagerlyZeroed, the disk will + be created using eager zero provisioning. + An eager zeroed thick disk has all space + allocated and wiped clean of any previous + contents on the physical media at creation + time. Such disks may take longer time during + creation compared to other disk formats. + + When omitted, no setting will be applied + to the data disk and the provisioning mode + for the disk will be determined by the default + storage policy configured for the datastore + in vSphere.' + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: 'sizeGiB is the size of the disk + in GiB. + + The maximum supported size is 16384 GiB.' + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + memoryMB: + description: Memory is the size of a VM's memory + in MB. + format: int64 + type: integer + osDisk: + description: OSDisk defines the storage for instance. + properties: + diskSizeGB: + description: DiskSizeGB defines the size of + disk in GB. + format: int32 + type: integer + type: object + zones: + description: 'Zones defines available zones + + Zones is available in TechPreview.' + items: + type: string + type: array + type: object + diskType: + description: 'DiskType is the name of the disk provisioning + type, + + valid values are thin, thick, and eagerZeroedThick. + When not + + specified, it will be set according to the default + storage policy + + of vsphere.' + enum: + - '' + - thin + - thick + - eagerZeroedThick + type: string + failureDomains: + description: 'FailureDomains holds the VSpherePlatformFailureDomainSpec + which contains + + the definition of region, zone and the vCenter topology. + + If this is omitted failure domains (regions and zones) + will not be used.' + items: + description: 'FailureDomain holds the region and zone + failure domain and + + the vCenter topology of that failure domain.' + properties: + name: + description: 'name defines the name of the FailureDomain + + This name is arbitrary but will be used + + in VSpherePlatformDeploymentZone for association.' + maxLength: 256 + minLength: 1 + type: string + region: + description: 'The region is the name of the tag + in vCenter that is associated with the + + tag category `openshift-region`. The region + name must match the tag name + + and must exist prior to installation. When the + regionType is Datacenter + + the tag must be attached to the toplogy.datacenter + object in vCenter. + + When the regionType is ComputeCluster the tag + must be attached to the topology.computeCluster + + object in vCenter.' + type: string + regionType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - Datacenter + - ComputeCluster + description: 'regionType is the type of failure + domain region, the current values are "Datacenter" + and "ComputeCluster" + + When regionType is Datacenter the zoneType must + be ComputeCluster. + + When regionType is ComputeCluster the zoneType + must be HostGroup' + type: string + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure + domain using vSphere constructs + properties: + computeCluster: + description: 'computeCluster as the failure + domain + + This is required to be a path' + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: 'datacenter is the vCenter datacenter + in which virtual machines will be located + + and defined as the failure domain.' + maxLength: 80 + minLength: 1 + type: string + datastore: + description: 'datastore is the name or inventory + path of the datastore in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + type: string + folder: + description: 'folder is the inventory path + of the folder in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: 'hostGroup is the name of the + vm-host group of type host within vCenter + for this failure domain. + + hostGroup is limited to 80 characters. + + This field is required when the ZoneType + is HostGroup' + maxLength: 80 + type: string + networks: + description: networks is the list of networks + within this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: 'resourcePool is the absolute + path of the resource pool where virtual + machines will be + + created. The absolute path is of the form + //host//Resources/.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: 'tagIDs is an optional set of + tags to add to an instance. Specified tagIDs + + must use URN-notation instead of display + names. A maximum of 10 tag IDs may be specified.' + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: 'template is the inventory path + of the virtual machine or template + + that will be used for cloning.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: 'The zone is the name of the tag + in vCenter that is associated with + + the tag category `openshift-zone`. The zone + name must match the tag name + + and must exist prior to installation. When zoneType + is HostGroup the + + ESXi hosts defined in the provided in the topology.hostGroup + field must be tagged. + + When the zoneType is ComputeCluster the tag + must be attached to the topology.computeCluster + + object in vCenter.' + type: string + zoneType: + allOf: + - enum: + - HostGroup + - Datacenter + - ComputeCluster + - enum: + - ComputeCluster + - HostGroup + description: 'When zoneType is ComputeCluster + the regionType must be Datacenter + + When zoneType is HostGroup the regionType must + be ComputeCluster + + If the zoneType is HostGroup topology.hostGroup + must be defined and exist in vCenter + + prior to installation.' + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + folder: + description: 'Folder is the absolute path of the folder + that will be used and/or created for + + virtual machines. The absolute path is of the form + //vm//. + + Deprecated: Use FailureDomains.Topology.Folder' + pattern: ^/.*?/vm/.*? + type: string + hosts: + description: Hosts defines network configurations to + be applied by the installer. Hosts is available in + TechPreview. + items: + description: Host defines host VMs to generate as + part of the installation. + properties: + failureDomain: + description: FailureDomain refers to the name + of a FailureDomain as described in https://github.com/openshift/enhancements/blob/master/enhancements/installer/vsphere-ipi-zonal.md + type: string + networkDevice: + description: NetworkDeviceSpec to be applied to + the host + properties: + gateway: + description: 'gateway is an IPv4 or IPv6 address + which represents the subnet gateway, + + for example, 192.168.1.1.' + format: ipv6 + type: string + ipAddrs: + description: 'ipAddrs is a list of one or + more IPv4 and/or IPv6 addresses and CIDR + to assign to + + this device, for example, 192.168.1.100/24. + IP addresses provided via ipAddrs are + + intended to allow explicit assignment of + a machine''s IP address.' + example: 2001:DB8:0000:0000:244:17FF:FEB6:D37D/64 + format: ipv6 + items: + type: string + type: array + nameservers: + description: 'nameservers is a list of IPv4 + and/or IPv6 addresses used as DNS nameservers, + for example, + + 8.8.8.8. a nameserver is not provided by + a fulfilled IPAddressClaim. If DHCP is not + the + + source of IP addresses for this network + device, nameservers should include a valid + nameserver.' + example: 8.8.8.8 + format: ipv6 + items: + type: string + type: array + required: + - ipAddrs + type: object + role: + description: Role defines the role of the node + enum: + - '' + - bootstrap + - control-plane + - compute + type: string + required: + - networkDevice + - role + type: object + type: array + ingressVIP: + description: 'DeprecatedIngressVIP is the virtual IP + address for ingress + + Deprecated: Use IngressVIPs' + type: string + ingressVIPs: + description: 'IngressVIPs contains the VIP(s) for ingress. + In dual stack clusters it + + contains an IPv4 and IPv6 address, otherwise only + one VIP' + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + description: 'LoadBalancer defines how the load balancer + used by the cluster is configured. + + LoadBalancer is available in TechPreview.' + properties: + type: + default: OpenShiftManagedDefault + description: 'type defines the type of load balancer + used by the cluster on VSphere platform + + which can be a user-managed or openshift-managed + load balancer + + that is to be used for the OpenShift API and Ingress + endpoints. + + When set to OpenShiftManagedDefault the static + pods in charge of API and Ingress traffic load-balancing + + defined in the machine config operator will be + deployed. + + When set to UserManaged these static pods will + not be deployed and it is expected that + + the load balancer is configured out of band by + the deployer. + + When omitted, this means no opinion and the platform + is left to choose a reasonable default. + + The default value is OpenShiftManagedDefault.' + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + network: + description: 'Network specifies the name of the network + to be used by the cluster. + + Deprecated: Use FailureDomains.Topology.Network' + type: string + nodeNetworking: + description: 'nodeNetworking contains the definition + of internal and external network constraints for + + assigning the node''s networking. + + If this field is omitted, networking defaults to the + legacy + + address selection behavior which is to only support + a single address and + + return the first one found.' + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: 'excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + + the IP address from the VirtualMachine''s + VM for use in the status.addresses fields.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: 'network VirtualMachine''s VM Network + names that will be used to when searching + + for status.addresses fields. Note that if + internal.networkSubnetCIDR and + + external.networkSubnetCIDR are not set, then + the vNIC associated to this network must + + only have a single IP address assigned to + it. + + The available networks (port groups) can be + listed using + + `govc ls ''network/*''`' + type: string + networkSubnetCidr: + description: 'networkSubnetCidr IP address on + VirtualMachine''s network interfaces included + in the fields'' CIDRs + + that will be used in respective status.addresses + fields.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: 'excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + + the IP address from the VirtualMachine''s + VM for use in the status.addresses fields.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: 'network VirtualMachine''s VM Network + names that will be used to when searching + + for status.addresses fields. Note that if + internal.networkSubnetCIDR and + + external.networkSubnetCIDR are not set, then + the vNIC associated to this network must + + only have a single IP address assigned to + it. + + The available networks (port groups) can be + listed using + + `govc ls ''network/*''`' + type: string + networkSubnetCidr: + description: 'networkSubnetCidr IP address on + VirtualMachine''s network interfaces included + in the fields'' CIDRs + + that will be used in respective status.addresses + fields.' + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + password: + description: 'Password is the password for the user + to use to connect to the vCenter. + + Deprecated: Use VCenters.Password' + type: string + resourcePool: + description: 'ResourcePool is the absolute path of the + resource pool where virtual machines will be + + created. The absolute path is of the form //host//Resources/. + + Deprecated: Use FailureDomains.Topology.ResourcePool' + type: string + username: + description: 'Username is the name of the user to use + to connect to the vCenter. + + Deprecated: Use VCenters.Username' + type: string + vCenter: + description: 'VCenter is the domain name or IP address + of the vCenter. + + Deprecated: Use VCenters.Server' + type: string + vcenters: + description: VCenters holds the connection details for + services to communicate with vCenter. + items: + description: 'VCenter stores the vCenter connection + fields + + https://github.com/kubernetes/cloud-provider-vsphere/blob/master/pkg/common/config/types_yaml.go' + properties: + datacenters: + description: Datacenter in which VMs are located. + items: + type: string + minItems: 1 + type: array + password: + description: Password is the password for the + user to use to connect to the vCenter. + type: string + port: + default: 443 + description: 'port is the TCP port that will be + used to communicate to + + the vCenter endpoint. This is typically unchanged + from + + the default of HTTPS TCP/443.' + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + maxLength: 255 + type: string + user: + description: Username is the username that will + be used to connect to vCenter + type: string + required: + - datacenters + - password + - server + - user + type: object + maxItems: 3 + minItems: 1 + type: array + type: object network: - description: Network specifies the name of the network to - be used by the cluster. + description: 'Network specifies the name of the network + to be used by the cluster. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string vCenter: - description: VCenter is the domain name or IP address of + description: 'VCenter is the domain name or IP address of the vCenter. + + Deprecated: Please use Platform.Infrastructure instead + + See also: Platform.ConvertDeprecatedFields' type: string required: - certificatesSecretRef - credentialsSecretRef - - datacenter - - defaultDatastore - - vCenter type: object type: object pullSecretRef: @@ -9427,6 +10878,70 @@ objects: cores to assign a vm. format: int32 type: integer + dataDisks: + description: DataDisks are additional disks to add to the + VM that are not part of the VM's OVA template. + items: + description: DataDisk defines a data disk to add to the + VM that is not part of the VM OVA template. + properties: + name: + description: 'name is used to identify the disk definition. + name is required needs to be unique so that it can + be used to + + clearly identify purpose of the disk.' + example: images_1 + maxLength: 80 + pattern: ^[a-zA-Z0-9]([-_a-zA-Z0-9]*[a-zA-Z0-9])?$ + type: string + provisioningMode: + description: 'provisioningMode is an optional field + that specifies the provisioning type to be used + by this vSphere data disk. + + Allowed values are "Thin", "Thick", "EagerlyZeroed", + and omitted. + + When set to Thin, the disk will be made using thin + provisioning allocating the bare minimum space. + + When set to Thick, the full disk size will be allocated + when disk is created. + + When set to EagerlyZeroed, the disk will be created + using eager zero provisioning. An eager zeroed thick + disk has all space allocated and wiped clean of + any previous contents on the physical media at creation + time. Such disks may take longer time during creation + compared to other disk formats. + + When omitted, no setting will be applied to the + data disk and the provisioning mode for the disk + will be determined by the default storage policy + configured for the datastore in vSphere.' + enum: + - Thin + - Thick + - EagerlyZeroed + type: string + sizeGiB: + description: 'sizeGiB is the size of the disk in GiB. + + The maximum supported size is 16384 GiB.' + format: int32 + maximum: 16384 + minimum: 1 + type: integer + required: + - name + - sizeGiB + type: object + maxItems: 29 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map memoryMB: description: Memory is the size of a VM's memory in MB. format: int64 @@ -9439,27 +10954,120 @@ objects: GB. format: int32 type: integer - required: - - diskSizeGB type: object resourcePool: description: 'ResourcePool is the name of the resource pool that will be used for virtual machines. - If it is not present, a default value will be used.' + If it is not present, a default value will be used. + + Deprecated: use Topology instead' type: string tagIDs: - description: TagIDs is a list of up to 10 tags to add to + description: 'TagIDs is a list of up to 10 tags to add to the VMs that this machine set provisions in vSphere. + + Deprecated: use Topology instead' items: type: string maxItems: 10 type: array - required: - - coresPerSocket - - cpus - - memoryMB - - osDisk + topology: + description: 'Topology is the vSphere topology that will + be used for virtual machines. + + If it is not present, a default value will be used.' + properties: + computeCluster: + description: 'computeCluster as the failure domain + + This is required to be a path' + maxLength: 2048 + minLength: 1 + type: string + datacenter: + description: 'datacenter is the vCenter datacenter in + which virtual machines will be located + + and defined as the failure domain.' + maxLength: 80 + minLength: 1 + type: string + datastore: + description: 'datastore is the name or inventory path + of the datastore in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + type: string + folder: + description: 'folder is the inventory path of the folder + in which the + + virtual machine is created/located.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + hostGroup: + description: 'hostGroup is the name of the vm-host group + of type host within vCenter for this failure domain. + + hostGroup is limited to 80 characters. + + This field is required when the ZoneType is HostGroup' + maxLength: 80 + type: string + networks: + description: networks is the list of networks within + this failure domain + items: + type: string + maxItems: 10 + minItems: 1 + type: array + resourcePool: + description: 'resourcePool is the absolute path of the + resource pool where virtual machines will be + + created. The absolute path is of the form //host//Resources/.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/host/.*?/Resources.* + type: string + tagIDs: + description: 'tagIDs is an optional set of tags to add + to an instance. Specified tagIDs + + must use URN-notation instead of display names. A + maximum of 10 tag IDs may be specified.' + example: urn:vmomi:InventoryServiceTag:5736bf56-49f5-4667-b38c-b97e09dc9578:GLOBAL + items: + type: string + type: array + template: + description: 'template is the inventory path of the + virtual machine or template + + that will be used for cloning.' + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zones: + description: 'Zones defines available zones + + Zones is available in TechPreview.' + items: + type: string + type: array type: object type: object replicas: diff --git a/hack/e2e-common.sh b/hack/e2e-common.sh index 9187ef14782..45e76875449 100755 --- a/hack/e2e-common.sh +++ b/hack/e2e-common.sh @@ -369,4 +369,4 @@ function capture_cluster_logs() { ${SRC_ROOT}/hack/logextractor.sh ${CLUSTER_NAME} "${ARTIFACT_DIR}/hive" exit 1 fi -} \ No newline at end of file +} diff --git a/pkg/clusterresource/builder_test.go b/pkg/clusterresource/builder_test.go index 11a72076253..8726060ca7e 100644 --- a/pkg/clusterresource/builder_test.go +++ b/pkg/clusterresource/builder_test.go @@ -9,6 +9,7 @@ import ( hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1azure "github.com/openshift/hive/apis/hive/v1/azure" "github.com/openshift/installer/pkg/types/nutanix" + "github.com/openshift/installer/pkg/types/vsphere" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" @@ -142,16 +143,37 @@ func createOpenStackClusterBuilder() *Builder { func createVSphereClusterBuilder() *Builder { b := createTestBuilder() b.CloudBuilder = &VSphereCloudBuilder{ - VCenter: "test", - Username: "test", - Password: "test", - Datacenter: "test", - DefaultDatastore: "test", - Folder: "test", - Cluster: "test", - APIVIP: "192.168.0.2", - IngressVIP: "192.168.0.3", - CACert: []byte{}, + Username: "test", + Password: "test", + CACert: []byte{}, + Infrastructure: &vsphere.Platform{ + VCenters: []vsphere.VCenter{ + { + Server: "test", + Port: 123, + Datacenters: []string{"test"}, + }, + }, + FailureDomains: []vsphere.FailureDomain{ + { + Name: "test", + Region: "test", + Zone: "test", + Server: "test", + Topology: vsphere.Topology{ + Datacenter: "test", + ComputeCluster: "test", + Networks: []string{"test"}, + Datastore: "test", + ResourcePool: "test", + Folder: "test", + Template: "test", + }, + }, + }, + APIVIPs: []string{"192.168.0.2"}, + IngressVIPs: []string{"192.168.0.3"}, + }, } return b } diff --git a/pkg/clusterresource/vsphere.go b/pkg/clusterresource/vsphere.go index bf592452c6c..4cd9d160030 100644 --- a/pkg/clusterresource/vsphere.go +++ b/pkg/clusterresource/vsphere.go @@ -19,39 +19,17 @@ var _ CloudBuilder = (*VSphereCloudBuilder)(nil) // VSphereCloudBuilder encapsulates cluster artifact generation logic specific to vSphere. type VSphereCloudBuilder struct { - // VCenter is the domain name or IP address of the vCenter. - VCenter string - // Username is the name of the user to use to connect to the vCenter. Username string // Password is the password for the user to use to connect to the vCenter. Password string - // Datacenter is the name of the datacenter to use in the vCenter. - Datacenter string - - // DefaultDatastore is the default datastore to use for provisioning volumes. - DefaultDatastore string - - // Folder is the name of the folder that will be used and/or created for - // virtual machines. - Folder string - - // Cluster is the name of the cluster virtual machines will be cloned into. - Cluster string - - // APIVIP is the virtual IP address for the api endpoint - APIVIP string - - // IngressVIP is the virtual IP address for ingress - IngressVIP string - - // Network specifies the name of the network to be used by the cluster. - Network string - // CACert is the CA certificate(s) used to communicate with the vCenter. CACert []byte + + // Infrastructure is the full vSphere platform spec + Infrastructure *installervsphere.Platform } func NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret *corev1.Secret) *VSphereCloudBuilder { @@ -111,43 +89,27 @@ func (p *VSphereCloudBuilder) GetCloudPlatform(o *Builder) hivev1.Platform { CertificatesSecretRef: corev1.LocalObjectReference{ Name: p.certificatesSecretName(o), }, - VCenter: p.VCenter, - Datacenter: p.Datacenter, - DefaultDatastore: p.DefaultDatastore, - Folder: p.Folder, - Cluster: p.Cluster, - Network: p.Network, + Infrastructure: p.Infrastructure, }, } } func (p *VSphereCloudBuilder) addMachinePoolPlatform(o *Builder, mp *hivev1.MachinePool) { mp.Spec.Platform.VSphere = &hivev1vsphere.MachinePool{ - NumCPUs: 2, - NumCoresPerSocket: 1, - MemoryMiB: 8192, - OSDisk: hivev1vsphere.OSDisk{ - DiskSizeGB: 120, + MachinePool: installervsphere.MachinePool{ + NumCPUs: 2, + NumCoresPerSocket: 1, + MemoryMiB: 8192, + OSDisk: installervsphere.OSDisk{ + DiskSizeGB: 120, + }, }, } } func (p *VSphereCloudBuilder) addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig) { - - // TODO: Watch for removal of deprecated fields https://issues.redhat.com/browse/SPLAT-1093 ic.Platform = installertypes.Platform{ - VSphere: &installervsphere.Platform{ - DeprecatedVCenter: p.VCenter, - DeprecatedUsername: p.Username, - DeprecatedPassword: p.Password, - DeprecatedDatacenter: p.Datacenter, - DeprecatedDefaultDatastore: p.DefaultDatastore, - DeprecatedFolder: p.Folder, - DeprecatedCluster: p.Cluster, - APIVIPs: []string{p.APIVIP}, - IngressVIPs: []string{p.IngressVIP}, - DeprecatedNetwork: p.Network, - }, + VSphere: p.Infrastructure, } } diff --git a/pkg/controller/clusterdeployment/clusterdeployment_controller.go b/pkg/controller/clusterdeployment/clusterdeployment_controller.go index 70773643837..df5db48ab4b 100644 --- a/pkg/controller/clusterdeployment/clusterdeployment_controller.go +++ b/pkg/controller/clusterdeployment/clusterdeployment_controller.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/bombsimon/logrusr/v4" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -431,6 +432,16 @@ func (r *ReconcileClusterDeployment) Reconcile(ctx context.Context, request reco return reconcile.Result{}, nil } + // HIVE-2391: remove this once we fully deprecate the old vSphere method (4.12 sunset) + if getClusterPlatform(cd) == constants.PlatformVSphere { + if cd.Spec.Platform.VSphere.Infrastructure == nil { + r.logger.WithField("gvk", cd.GroupVersionKind().String()).WithField("name", cd.Name).WithField("namespace", cd.Namespace).Info("Updating deprecated vSphere fields on ClusterDeployment object") + cd = cd.DeepCopy() + cd.Spec.Platform.VSphere.ConvertDeprecatedFields(logrusr.New(r.logger)) + return reconcile.Result{}, r.Update(ctx, cd) + } + } + return r.reconcile(request, cd, cdLog) } @@ -2030,10 +2041,14 @@ func generateDeprovision(cd *hivev1.ClusterDeployment) (*hivev1.ClusterDeprovisi CertificatesSecretRef: cd.Spec.Platform.OpenStack.CertificatesSecretRef, } case cd.Spec.Platform.VSphere != nil: + vcenters := make([]string, 0, len(cd.Spec.Platform.VSphere.Infrastructure.VCenters)) + for _, vcenter := range cd.Spec.Platform.VSphere.Infrastructure.VCenters { + vcenters = append(vcenters, vcenter.Server) + } req.Spec.Platform.VSphere = &hivev1.VSphereClusterDeprovision{ CredentialsSecretRef: cd.Spec.Platform.VSphere.CredentialsSecretRef, CertificatesSecretRef: cd.Spec.Platform.VSphere.CertificatesSecretRef, - VCenter: cd.Spec.Platform.VSphere.VCenter, + VCenters: vcenters, } case cd.Spec.Platform.IBMCloud != nil: req.Spec.Platform.IBMCloud = &hivev1.IBMClusterDeprovision{ diff --git a/pkg/controller/clusterdeployment/installconfigvalidation.go b/pkg/controller/clusterdeployment/installconfigvalidation.go index 83e0f3803b0..43af4d96eac 100644 --- a/pkg/controller/clusterdeployment/installconfigvalidation.go +++ b/pkg/controller/clusterdeployment/installconfigvalidation.go @@ -63,9 +63,15 @@ func ValidateInstallConfig(cd *hivev1.ClusterDeployment, installConfigSecret *co if ic.Platform.VSphere == nil { return ic, errors.New(novSpherePlatformErr) } - if (ic.Platform.VSphere.DeprecatedUsername == "" || ic.Platform.VSphere.DeprecatedPassword == "") && - (len(ic.Platform.VSphere.VCenters) == 0 || ic.Platform.VSphere.VCenters[0].Username == "" || - ic.Platform.VSphere.VCenters[0].Password == "") { + + hasCreds := ic.Platform.VSphere.DeprecatedUsername != "" || ic.Platform.VSphere.DeprecatedPassword != "" + for _, vcenter := range ic.Platform.VSphere.VCenters { + if vcenter.Username != "" && vcenter.Password != "" { + hasCreds = true + } + } + + if !hasCreds { return ic, errors.New(missingvSphereCredentialsErr) } case platform.Nutanix != nil: diff --git a/pkg/controller/clusterdeployment/installconfigvalidation_test.go b/pkg/controller/clusterdeployment/installconfigvalidation_test.go index 537e57b7579..83ed3a31bb3 100644 --- a/pkg/controller/clusterdeployment/installconfigvalidation_test.go +++ b/pkg/controller/clusterdeployment/installconfigvalidation_test.go @@ -1,7 +1,6 @@ package clusterdeployment import ( - hivev1nutanix "github.com/openshift/hive/apis/hive/v1/nutanix" "testing" "github.com/stretchr/testify/assert" @@ -12,8 +11,11 @@ import ( hivev1aws "github.com/openshift/hive/apis/hive/v1/aws" hivev1azure "github.com/openshift/hive/apis/hive/v1/azure" hivev1gcp "github.com/openshift/hive/apis/hive/v1/gcp" + hivev1nutanix "github.com/openshift/hive/apis/hive/v1/nutanix" hivev1vpshere "github.com/openshift/hive/apis/hive/v1/vsphere" testcd "github.com/openshift/hive/pkg/test/clusterdeployment" + installervsphere "github.com/openshift/installer/pkg/types/vsphere" + "github.com/openshift/hive/pkg/util/scheme" ) @@ -264,9 +266,7 @@ func TestInstallConfigValidation(t *testing.T) { name: "test install config no vSphere platform", cd: cdBuilder.Build( func(cd *hivev1.ClusterDeployment) { - cd.Spec.Platform.VSphere = &hivev1vpshere.Platform{ - VCenter: "10.0.0.1", - } + cd.Spec.Platform.VSphere = &hivev1vpshere.Platform{} }, ), ic: testAWSIC, @@ -277,7 +277,7 @@ func TestInstallConfigValidation(t *testing.T) { cd: cdBuilder.Build( func(cd *hivev1.ClusterDeployment) { cd.Spec.Platform.VSphere = &hivev1vpshere.Platform{ - VCenter: "10.0.0.1", + Infrastructure: &installervsphere.Platform{}, } }, ), diff --git a/pkg/controller/clusterpool/clusterpool_controller.go b/pkg/controller/clusterpool/clusterpool_controller.go index f493118398d..1e1f2b2a770 100644 --- a/pkg/controller/clusterpool/clusterpool_controller.go +++ b/pkg/controller/clusterpool/clusterpool_controller.go @@ -9,6 +9,7 @@ import ( "sort" "strings" + "github.com/bombsimon/logrusr/v4" "github.com/davegardnerisme/deephash" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -318,6 +319,16 @@ func (r *ReconcileClusterPool) Reconcile(ctx context.Context, request reconcile. return reconcile.Result{}, nil } + // HIVE-2391: remove this once we fully deprecate the old vSphere method (4.12 sunset) + if clp.Spec.Platform.VSphere != nil { + if clp.Spec.Platform.VSphere.Infrastructure == nil { + r.logger.WithField("gvk", clp.GroupVersionKind().String()).WithField("name", clp.Name).WithField("namespace", clp.Namespace).Info("Updating deprecated vSphere fields on ClusterPool object") + clp = clp.DeepCopy() + clp.Spec.Platform.VSphere.ConvertDeprecatedFields(logrusr.New(r.logger)) + return reconcile.Result{}, r.Update(ctx, clp) + } + } + // If the pool is deleted, clear finalizer once all ClusterDeployments have been deleted. if clp.DeletionTimestamp != nil { return reconcile.Result{}, r.reconcileDeletedPool(clp, logger) @@ -1311,6 +1322,9 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg cloudBuilder.Cloud = platform.OpenStack.Cloud return cloudBuilder, nil case platform.VSphere != nil: + if platform.VSphere.Infrastructure == nil { + return nil, errors.New("VSphere ClusterPool with deprecated fields has not been updated by ClusterPool controller yet, requeueing...") + } credsSecret, err := r.getCredentialsSecret(pool, platform.VSphere.CredentialsSecretRef.Name, logger) if err != nil { return nil, err @@ -1326,12 +1340,7 @@ func (r *ReconcileClusterPool) createCloudBuilder(pool *hivev1.ClusterPool, logg } cloudBuilder := clusterresource.NewVSphereCloudBuilderFromSecret(credsSecret, certsSecret) - cloudBuilder.Datacenter = platform.VSphere.Datacenter - cloudBuilder.DefaultDatastore = platform.VSphere.DefaultDatastore - cloudBuilder.VCenter = platform.VSphere.VCenter - cloudBuilder.Cluster = platform.VSphere.Cluster - cloudBuilder.Folder = platform.VSphere.Folder - cloudBuilder.Network = platform.VSphere.Network + cloudBuilder.Infrastructure = platform.VSphere.Infrastructure return cloudBuilder, nil default: diff --git a/pkg/controller/machinepool/vsphereactuator.go b/pkg/controller/machinepool/vsphereactuator.go index 7a7866c5c57..837e84f7eca 100644 --- a/pkg/controller/machinepool/vsphereactuator.go +++ b/pkg/controller/machinepool/vsphereactuator.go @@ -2,7 +2,6 @@ package machinepool import ( "fmt" - "strings" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -10,9 +9,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" machineapi "github.com/openshift/api/machine/v1beta1" - installvsphere "github.com/openshift/installer/pkg/asset/machines/vsphere" + installvspheremachines "github.com/openshift/installer/pkg/asset/machines/vsphere" installertypes "github.com/openshift/installer/pkg/types" - installertypesvsphere "github.com/openshift/installer/pkg/types/vsphere" + installvsphere "github.com/openshift/installer/pkg/types/vsphere" vsphereutil "github.com/openshift/machine-api-operator/pkg/controller/vsphere" hivev1 "github.com/openshift/hive/apis/hive/v1" @@ -50,58 +49,44 @@ func (a *VSphereActuator) GenerateMachineSets(cd *hivev1.ClusterDeployment, pool if cd.Spec.Platform.VSphere == nil { return nil, false, errors.New("ClusterDeployment is not for VSphere") } + if cd.Spec.Platform.VSphere.Infrastructure == nil { + return nil, false, errors.New("VSphere CD with deprecated fields has not been updated by CD controller yet, requeueing...") + } if pool.Spec.Platform.VSphere == nil { return nil, false, errors.New("MachinePool is not for VSphere") } computePool := baseMachinePool(pool) - computePool.Platform.VSphere = &installertypesvsphere.MachinePool{ - NumCPUs: pool.Spec.Platform.VSphere.NumCPUs, - NumCoresPerSocket: pool.Spec.Platform.VSphere.NumCoresPerSocket, - MemoryMiB: pool.Spec.Platform.VSphere.MemoryMiB, - OSDisk: installertypesvsphere.OSDisk{ - DiskSizeGB: pool.Spec.Platform.VSphere.DiskSizeGB, - }, - } + computePool.Platform.VSphere = &pool.Spec.Platform.VSphere.MachinePool - // Fake an install config as we do with other actuators. We only populate what we know is needed today. - // WARNING: changes to use more of installconfig in the MachineSets function can break here. Hopefully - // will be caught by unit tests. + // Fake an install config as we do with other actuators. ic := &installertypes.InstallConfig{ Platform: installertypes.Platform{ - VSphere: &installertypesvsphere.Platform{ - VCenters: []installertypesvsphere.VCenter{ - { - Server: cd.Spec.Platform.VSphere.VCenter, - Port: 443, - Username: "", - Password: "", - Datacenters: []string{cd.Spec.Platform.VSphere.Datacenter}, - }, - }, - FailureDomains: []installertypesvsphere.FailureDomain{ - { - Name: "generated-failure-domain", - Region: "generated-region", - Zone: "generated-zone", - Server: cd.Spec.Platform.VSphere.VCenter, - Topology: installertypesvsphere.Topology{ - Datacenter: cd.Spec.Platform.VSphere.Datacenter, - Datastore: setDatastorePath(cd.Spec.Platform.VSphere.DefaultDatastore, cd.Spec.Platform.VSphere.Datacenter, logger), - Folder: setFolderPath(cd.Spec.Platform.VSphere.Folder, cd.Spec.Platform.VSphere.Datacenter, logger), - ComputeCluster: setComputeClusterPath(cd.Spec.Platform.VSphere.Cluster, cd.Spec.Platform.VSphere.Datacenter, logger), - Networks: []string{cd.Spec.Platform.VSphere.Network}, - Template: a.osImage, - ResourcePool: pool.Spec.Platform.VSphere.ResourcePool, - TagIDs: pool.Spec.Platform.VSphere.TagIDs, - }, - }, - }, - }, + VSphere: cd.Spec.Platform.VSphere.Infrastructure, }, } + for i := range ic.VSphere.FailureDomains { + failureDomain := &ic.VSphere.FailureDomains[i] // because go ranges by copy, not by reference + if a.osImage != "" { + failureDomain.Topology.Template = a.osImage + } + if pool.Spec.Platform.VSphere.Topology != nil { + newTopo, err := applyTopologyTemplate(failureDomain.Topology, *pool.Spec.Platform.VSphere.Topology, a.logger) + if err != nil { + return nil, false, err + } + + failureDomain.Topology = newTopo + } + if pool.Spec.Platform.VSphere.DeprecatedResourcePool != "" { + failureDomain.Topology.ResourcePool = pool.Spec.Platform.VSphere.DeprecatedResourcePool + } + if len(pool.Spec.Platform.VSphere.DeprecatedTagIDs) > 0 { + failureDomain.Topology.TagIDs = pool.Spec.Platform.VSphere.DeprecatedTagIDs + } + } - installerMachineSets, err := installvsphere.MachineSets( + installerMachineSets, err := installvspheremachines.MachineSets( cd.Spec.ClusterMetadata.InfraID, ic, computePool, @@ -127,28 +112,48 @@ func getVSphereOSImage(masterMachine *machineapi.Machine, scheme *runtime.Scheme return osImage, nil } -// Copied from https://github.com/openshift/installer/blob/f7731922a0f17a8339a3e837f72898ac77643611/pkg/types/vsphere/conversion/installconfig.go#L75-L97 +func applyTopologyTemplate(base installvsphere.Topology, template installvsphere.Topology, logger log.FieldLogger) (out installvsphere.Topology, err error) { + var ubase map[string]interface{} + var utemplate map[string]interface{} -func setComputeClusterPath(cluster, datacenter string, logger log.FieldLogger) string { - if cluster != "" && !strings.HasPrefix(cluster, "/") { - logger.Warn("computeCluster as a non-path is now depreciated please use the form: /%s/host/%s", datacenter, cluster) - return fmt.Sprintf("/%s/host/%s", datacenter, cluster) + ubase, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&base) + if err != nil { + return } - return cluster -} -func setDatastorePath(datastore, datacenter string, logger log.FieldLogger) string { - if datastore != "" && !strings.HasPrefix(datastore, "/") { - logger.Warn("datastore as a non-path is now depreciated please use the form: /%s/datastore/%s", datacenter, datastore) - return fmt.Sprintf("/%s/datastore/%s", datacenter, datastore) + utemplate, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&template) + if err != nil { + return } - return datastore -} -func setFolderPath(folder, datacenter string, logger log.FieldLogger) string { - if folder != "" && !strings.HasPrefix(folder, "/") { - logger.Warn("folder as a non-path is now depreciated please use the form: /%s/vm/%s", datacenter, folder) - return fmt.Sprintf("/%s/vm/%s", datacenter, folder) + for k, i := range utemplate { + switch v := i.(type) { + case string: + if v != "" { + ubase[k] = v + } + case []interface{}: + switch v[0].(type) { + case string: + if len(v) > 0 { + ubase[k] = v + } + default: + logger. + WithField("field-name", k). + WithField("field-value", v). + WithField("field-type", fmt.Sprintf("%T", v)). + Warn("unexpected value on vsphere machinepool topology, please report this to the Hive maintainers") + } + default: + logger. + WithField("field-name", k). + WithField("field-value", v). + WithField("field-type", fmt.Sprintf("%T", v)). + Warn("unexpected value on vsphere machinepool topology, please report this to the Hive maintainers") + } } - return folder + + err = runtime.DefaultUnstructuredConverter.FromUnstructured(ubase, &out) + return } diff --git a/pkg/controller/machinepool/vsphereactuator_test.go b/pkg/controller/machinepool/vsphereactuator_test.go index 8828fb8efb0..3c7d41bb89d 100644 --- a/pkg/controller/machinepool/vsphereactuator_test.go +++ b/pkg/controller/machinepool/vsphereactuator_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/openshift/installer/pkg/types/vsphere" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -28,14 +29,21 @@ func TestVSphereActuator(t *testing.T) { expectedMachineSetReplicas map[string]int64 expectedErr bool }{ + { + name: "deprecated vsphere fields", + clusterDeployment: testDeprecatedVSphereClusterDeployment(), + pool: testVSpherePool(), + masterMachine: testVSphereMachine("master0", "master"), + expectedErr: true, + }, { name: "generate machineset", clusterDeployment: testVSphereClusterDeployment(), pool: testVSpherePool(), + masterMachine: testVSphereMachine("master0", "master"), expectedMachineSetReplicas: map[string]int64{ fmt.Sprintf("%s-worker-0", testInfraID): 3, }, - masterMachine: testVSphereMachine("master0", "master"), }, } @@ -73,8 +81,8 @@ func validateVSphereMachineSets(t *testing.T, mSets []*machineapi.MachineSet, ex assert.Equal(t, int32(4), vsphereProvider.NumCPUs, "unexpected NumCPUs") assert.Equal(t, int32(4), vsphereProvider.NumCoresPerSocket, "unexpected NumCoresPerSocket") assert.Equal(t, int32(512), vsphereProvider.DiskGiB, "unexpected DiskGiB") - assert.Equal(t, "/vsphere-datacenter/vm/vsphere-folder", vsphereProvider.Workspace.Folder, "unexpected Folder") - assert.Equal(t, "/vsphere-datacenter/host/vsphere-cluster/Resources/vsphere-pool", vsphereProvider.Workspace.ResourcePool, "unexpected ResourcePool") + assert.Equal(t, "good-folder", vsphereProvider.Workspace.Folder, "unexpected Folder") + assert.Equal(t, "good-pool", vsphereProvider.Workspace.ResourcePool, "unexpected ResourcePool") if assert.Len(t, vsphereProvider.TagIDs, 1, "missing tag IDs") { assert.Equal(t, vsphereProvider.TagIDs[0], "vsphere-tag") } @@ -86,19 +94,37 @@ func testVSpherePool() *hivev1.MachinePool { p := testMachinePool() p.Spec.Platform = hivev1.MachinePoolPlatform{ VSphere: &hivev1vsphere.MachinePool{ - ResourcePool: "/vsphere-datacenter/host/vsphere-cluster/Resources/vsphere-pool", - MemoryMiB: 32 * 1024, - NumCPUs: 4, - NumCoresPerSocket: 4, - OSDisk: hivev1vsphere.OSDisk{ - DiskSizeGB: 512, + MachinePool: vsphere.MachinePool{ + MemoryMiB: 32 * 1024, + NumCPUs: 4, + NumCoresPerSocket: 4, + OSDisk: hivev1vsphere.OSDisk{ + DiskSizeGB: 512, + }, + }, + Topology: &vsphere.Topology{ + ResourcePool: "good-pool", + Folder: "good-folder", + TagIDs: []string{"vsphere-tag"}, }, - TagIDs: []string{"vsphere-tag"}, }, } return p } +func testDeprecatedVSphereClusterDeployment() *hivev1.ClusterDeployment { + cd := testClusterDeployment() + cd.Spec.Platform = hivev1.Platform{ + VSphere: &hivev1vsphere.Platform{ + CredentialsSecretRef: corev1.LocalObjectReference{ + Name: "vsphere-credentials", + }, + DeprecatedFolder: "/vsphere-datacenter/vm/vsphere-folder", + }, + } + return cd +} + func testVSphereClusterDeployment() *hivev1.ClusterDeployment { cd := testClusterDeployment() cd.Spec.Platform = hivev1.Platform{ @@ -106,7 +132,22 @@ func testVSphereClusterDeployment() *hivev1.ClusterDeployment { CredentialsSecretRef: corev1.LocalObjectReference{ Name: "vsphere-credentials", }, - Folder: "/vsphere-datacenter/vm/vsphere-folder", + Infrastructure: &vsphere.Platform{ + VCenters: []vsphere.VCenter{ + { + Server: "test-server", + }, + }, + FailureDomains: []vsphere.FailureDomain{ + { + Server: "test-server", + Topology: vsphere.Topology{ + ResourcePool: "default-pool", + Folder: "default-folder", + }, + }, + }, + }, }, } return cd diff --git a/pkg/controller/utils/credentials.go b/pkg/controller/utils/credentials.go index 60b5f11ee09..2fd4759227e 100644 --- a/pkg/controller/utils/credentials.go +++ b/pkg/controller/utils/credentials.go @@ -60,11 +60,17 @@ func ValidateCredentialsForClusterDeployment(kubeClient client.Client, cd *hivev } - return validateVSphereCredentials(cd.Spec.Platform.VSphere.VCenter, - string(secret.Data[constants.UsernameSecretKey]), - string(secret.Data[constants.PasswordSecretKey]), - rootCAFiles, - logger) + for _, vcenter := range cd.Spec.Platform.VSphere.Infrastructure.VCenters { + valid, err := validateVSphereCredentials(vcenter.Server, + string(secret.Data[constants.UsernameSecretKey]), + string(secret.Data[constants.PasswordSecretKey]), + rootCAFiles, + logger) + if err != nil || valid == false { + return false, err + } + } + return true, nil default: // If we have no platform-specific credentials verification // assume the creds are valid. diff --git a/pkg/install/generate.go b/pkg/install/generate.go index 1b627d3c565..4c533df7d1a 100644 --- a/pkg/install/generate.go +++ b/pkg/install/generate.go @@ -826,6 +826,14 @@ func completeVSphereDeprovisionJob(req *hivev1.ClusterDeprovision, job *batchv1. req.Namespace, "vsphere-creds", constants.VSphereCredentialsDir, req.Spec.Platform.VSphere.CredentialsSecretRef.Name, "vsphere-certificates", constants.VSphereCertificatesDir, req.Spec.Platform.VSphere.CertificatesSecretRef.Name) + + var joinedVCenters string + if len(req.Spec.Platform.VSphere.VCenters) == 0 && req.Spec.Platform.VSphere.VCenter != "" { + joinedVCenters = req.Spec.Platform.VSphere.VCenter + } else { + joinedVCenters = strings.Join(req.Spec.Platform.VSphere.VCenters, "::") + } + job.Spec.Template.Spec.Containers = []corev1.Container{ { Name: "deprovision", @@ -835,7 +843,7 @@ func completeVSphereDeprovisionJob(req *hivev1.ClusterDeprovision, job *batchv1. Command: []string{"/usr/bin/hiveutil"}, Args: []string{ "deprovision", "vsphere", - "--vsphere-vcenter", req.Spec.Platform.VSphere.VCenter, + "--vsphere-vcenter", joinedVCenters, "--loglevel", "debug", "--creds-dir", constants.VSphereCredentialsDir, req.Spec.InfraID, diff --git a/pkg/installmanager/installmanager.go b/pkg/installmanager/installmanager.go index 267ce1143c6..835e1bd2120 100644 --- a/pkg/installmanager/installmanager.go +++ b/pkg/installmanager/installmanager.go @@ -745,13 +745,20 @@ func cleanupFailedProvision(dynClient client.Client, cd *hivev1.ClusterDeploymen if vSpherePassword == "" { return fmt.Errorf("no %s env var set, cannot proceed", constants.VSpherePasswordEnvVar) } + + vcenters := make([]installertypesvsphere.VCenters, 0, len(cd.Spec.Platform.VSphere.Infrastructure.VCenters)) + for _, vcenter := range cd.Spec.Platform.VSphere.Infrastructure.VCenters { + vcenters = append(vcenters, installertypesvsphere.VCenters{ + VCenter: vcenter.Server, + Username: vcenter.Username, + Password: vcenter.Password, + }) + } metadata := &installertypes.ClusterMetadata{ InfraID: infraID, ClusterPlatformMetadata: installertypes.ClusterPlatformMetadata{ VSphere: &installertypesvsphere.Metadata{ - VCenter: cd.Spec.Platform.VSphere.VCenter, - Username: vSphereUsername, - Password: vSpherePassword, + VCenters: vcenters, }, }, } diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go index e8c2597ea9c..32f594fb50b 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook.go @@ -2,6 +2,7 @@ package v1 import ( "fmt" + "github.com/bombsimon/logrusr/v4" "net/http" "regexp" "strconv" @@ -306,7 +307,7 @@ func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec } } - allErrs = append(allErrs, validateClusterPlatform(specPath.Child("platform"), cd.Spec.Platform)...) + allErrs = append(allErrs, validateClusterPlatform(specPath.Child("platform"), cd.Spec.Platform, contextLogger)...) allErrs = append(allErrs, validateCanManageDNSForClusterPlatform(specPath, cd.Spec)...) if cd.Spec.Platform.AWS != nil { @@ -458,7 +459,7 @@ func validatefeatureGates(decoder admission.Decoder, admissionSpec *admissionv1b return nil } -func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.ErrorList { +func validateClusterPlatform(path *field.Path, platform hivev1.Platform, entry *log.Entry) field.ErrorList { allErrs := field.ErrorList{} numberOfPlatforms := 0 if aws := platform.AWS; aws != nil { @@ -511,6 +512,8 @@ func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.E } } if vsphere := platform.VSphere; vsphere != nil { + vsphere = vsphere.DeepCopy() + vsphere.ConvertDeprecatedFields(logrusr.New(entry)) numberOfPlatforms++ vspherePath := path.Child("vsphere") if vsphere.CredentialsSecretRef.Name == "" { @@ -519,14 +522,8 @@ func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.E if vsphere.CertificatesSecretRef.Name == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("certificatesSecretRef", "name"), "must specify certificates for vSphere access")) } - if vsphere.VCenter == "" { - allErrs = append(allErrs, field.Required(vspherePath.Child("vCenter"), "must specify vSphere vCenter")) - } - if vsphere.Datacenter == "" { - allErrs = append(allErrs, field.Required(vspherePath.Child("datacenter"), "must specify vSphere datacenter")) - } - if vsphere.DefaultDatastore == "" { - allErrs = append(allErrs, field.Required(vspherePath.Child("defaultDatastore"), "must specify vSphere defaultDatastore")) + if len(vsphere.Infrastructure.VCenters) == 0 { + allErrs = append(allErrs, field.Required(vspherePath.Child("vSphere").Child("vcenters").Index(0), "must specify at least one vSphere vCenter")) } } if ibmCloud := platform.IBMCloud; ibmCloud != nil { @@ -635,6 +632,18 @@ func (a *ClusterDeploymentValidatingAdmissionHook) validateUpdate(admissionSpec // Add the new data to the contextLogger contextLogger.Data["oldObject.Name"] = oldObject.Name + // HIVE-2391 + if oldObject.Spec.Platform.VSphere != nil && cd.Spec.Platform.VSphere != nil { + // Moving from a non-zonal to a zonal shape is permitted. + // This check is faster than checking all the fields individually + if oldObject.Spec.Platform.VSphere.Infrastructure == nil && cd.Spec.Platform.VSphere.Infrastructure != nil { + contextLogger.Debug("Passed validation: HIVE-2391") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + } + hasChangedImmutableField, unsupportedDiff := hasChangedImmutableField(&oldObject.Spec, &cd.Spec) if hasChangedImmutableField { message := fmt.Sprintf("Attempted to change ClusterDeployment.Spec which is immutable except for %s fields. Unsupported change: \n%s", strings.Join(mutableFields, ","), unsupportedDiff) diff --git a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go index ea5e50b282a..c5f11a8c4da 100644 --- a/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/clusterdeployment_validating_admission_hook_test.go @@ -24,6 +24,7 @@ import ( hivev1openstack "github.com/openshift/hive/apis/hive/v1/openstack" hivev1vsphere "github.com/openshift/hive/apis/hive/v1/vsphere" hivecontractsv1alpha1 "github.com/openshift/hive/apis/hivecontracts/v1alpha1" + installervsphere "github.com/openshift/installer/pkg/types/vsphere" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/util/contracts" @@ -131,14 +132,28 @@ func validOpenStackClusterDeployment() *hivev1.ClusterDeployment { func validVSphereClusterDeployment() *hivev1.ClusterDeployment { cd := clusterDeploymentTemplate() cd.Spec.Platform.VSphere = &hivev1vsphere.Platform{ - VCenter: "somevcenter.com", CredentialsSecretRef: corev1.LocalObjectReference{Name: "fake-creds-secret"}, CertificatesSecretRef: corev1.LocalObjectReference{Name: "fake-cert-secret"}, - Datacenter: "dc1", - DefaultDatastore: "vmse-test", - Folder: "/dc1/vm/test", - Cluster: "test", - Network: "Network", + Infrastructure: &installervsphere.Platform{ + VCenters: []installervsphere.VCenter{ + { + Server: "somevcenter.com", + Datacenters: []string{"dc1"}, + }, + }, + FailureDomains: []installervsphere.FailureDomain{ + { + Server: "somevcenter.com", + Topology: installervsphere.Topology{ + Datacenter: "dc1", + Datastore: "vmse-test", + Folder: "/dc1/vm/test", + ComputeCluster: "test", + Networks: []string{"Network"}, + }, + }, + }, + }, } return cd } diff --git a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go index 94a96c97377..0230d9b3a57 100644 --- a/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go +++ b/pkg/validating-webhooks/hive/v1/clusterpool_validating_admission_hook.go @@ -177,7 +177,7 @@ func (a *ClusterPoolValidatingAdmissionHook) validateCreate(admissionSpec *admis allErrs := field.ErrorList{} specPath := field.NewPath("spec") - allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform, contextLogger)...) if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() @@ -234,10 +234,22 @@ func (a *ClusterPoolValidatingAdmissionHook) validateUpdate(admissionSpec *admis // Add the new data to the contextLogger contextLogger.Data["oldObject.Name"] = oldObject.Name + // HIVE-2391 + if oldObject.Spec.Platform.VSphere != nil && newObject.Spec.Platform.VSphere != nil { + // Moving from a non-zonal to a zonal shape is permitted. + // This check is faster than checking all the fields individually + if oldObject.Spec.Platform.VSphere.Infrastructure == nil && newObject.Spec.Platform.VSphere.Infrastructure != nil { + contextLogger.Debug("Passed validation: HIVE-2391") + return &admissionv1beta1.AdmissionResponse{ + Allowed: true, + } + } + } + allErrs := field.ErrorList{} specPath := field.NewPath("spec") - allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform)...) + allErrs = append(allErrs, validateClusterPlatform(specPath, newObject.Spec.Platform, contextLogger)...) if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") diff --git a/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go b/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go index dbb79378fbe..9123596a0d6 100644 --- a/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go +++ b/pkg/validating-webhooks/hive/v1/machinepool_validating_admission_hook_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + installervsphere "github.com/openshift/installer/pkg/types/vsphere" "github.com/stretchr/testify/assert" admissionv1beta1 "k8s.io/api/admission/v1beta1" @@ -702,8 +703,10 @@ func validAzureMachinePoolPlatform() *hivev1azure.MachinePool { func validvSphereMachinePoolPlatform() *hivev1vsphere.MachinePool { return &hivev1vsphere.MachinePool{ - OSDisk: hivev1vsphere.OSDisk{ - DiskSizeGB: 1, + MachinePool: installervsphere.MachinePool{ + OSDisk: hivev1vsphere.OSDisk{ + DiskSizeGB: 1, + }, }, } } diff --git a/vendor/github.com/bombsimon/logrusr/v4/.gitignore b/vendor/github.com/bombsimon/logrusr/v4/.gitignore new file mode 100644 index 00000000000..c6dd83b34c5 --- /dev/null +++ b/vendor/github.com/bombsimon/logrusr/v4/.gitignore @@ -0,0 +1,81 @@ + +# Created by https://www.gitignore.io/api/vim,macOS,IntelliJ+allb,go +# Edit at https://www.gitignore.io/?templates=vim,macOS,IntelliJ+allb,go + +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +### Go Patch ### +/vendor/ +/Godeps/ + +#!! ERROR: intellij+allb is undefined. Use list command to see defined gitignore types !!# + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### Vim ### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ + +# Auto-generated tag files +tags + +# Persistent undo +[._]*.un~ + +# Coc configuration directory +.vim + +# End of https://www.gitignore.io/api/vim,macOS,IntelliJ+allb,go diff --git a/vendor/github.com/bombsimon/logrusr/v4/LICENSE b/vendor/github.com/bombsimon/logrusr/v4/LICENSE new file mode 100644 index 00000000000..ba46c0cd16e --- /dev/null +++ b/vendor/github.com/bombsimon/logrusr/v4/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Simon Sawert + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bombsimon/logrusr/v4/README.md b/vendor/github.com/bombsimon/logrusr/v4/README.md new file mode 100644 index 00000000000..7ce274951ce --- /dev/null +++ b/vendor/github.com/bombsimon/logrusr/v4/README.md @@ -0,0 +1,35 @@ +# Logrusr + +[![Go Reference](https://pkg.go.dev/badge/github.com/bombsimon/logrusr.svg)](https://pkg.go.dev/github.com/bombsimon/logrusr/v4) +[![GitHub Actions](https://github.com/bombsimon/logrusr/actions/workflows/go.yml/badge.svg)](https://github.com/bombsimon/logrusr/actions/workflows/go.yml) +[![Coverage Status](https://coveralls.io/repos/github/bombsimon/logrusr/badge.svg?branch=main)](https://coveralls.io/github/bombsimon/logrusr?branch=main) +[![Go Report Card](https://goreportcard.com/badge/github.com/bombsimon/logrusr)](https://goreportcard.com/report/github.com/bombsimon/logrusr) + +A [logr](https://github.com/go-logr/logr) implementation using +[logrus](https://github.com/sirupsen/logrus). + +## Usage + +```go +import ( + "github.com/bombsimon/logrusr/v4" + "github.com/go-logr/logr" + "github.com/sirupsen/logrus" +) + +func main() { + logrusLog := logrus.New() + log := logrusr.New(logrusLog) + + log = log.WithName("MyName").WithValues("user", "you") + log.Info("Logr in action!", "the answer", 42) +} +``` + +For more details, see [example](example/main.go). + +## Implementation details + +The New method takes a `logrus.FieldLogger` interface as input which means +this works with both `logrus.Logger` and `logrus.Entry`. This is currently a +quite naive implementation in early state. Use with caution. diff --git a/vendor/github.com/bombsimon/logrusr/v4/logrusr.go b/vendor/github.com/bombsimon/logrusr/v4/logrusr.go new file mode 100644 index 00000000000..3c151996937 --- /dev/null +++ b/vendor/github.com/bombsimon/logrusr/v4/logrusr.go @@ -0,0 +1,249 @@ +package logrusr + +import ( + "encoding/json" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/go-logr/logr" + "github.com/sirupsen/logrus" +) + +// According to the specification of the Logger interface calling the InfoLogger +// directly on the logger should be the same as calling them on V(0). Since +// logrus level 0 is PanicLevel and Infolevel doesn't start until V(4) we use +// this constant to be able to calculate what V(n) values should mean. +const logrusDiffToInfo = 4 + +// FormatFunc is the function to format log values with for non primitive data. +// If this is not set (default) all unknown types will be JSON marshaled and +// added as a string. +type FormatFunc func(interface{}) interface{} + +// Option is options to give when construction a logrusr logger. +type Option func(l *logrusr) + +// WithFormatter will set the FormatFunc to use. If you only want to format a +// specific type that is yours, prefer using the logr.Marshaler interface +// instead. The `FormatFunc` is better suited for types that are not yours such +// as external types, maps or slices. +func WithFormatter(f FormatFunc) Option { + return func(l *logrusr) { + l.formatter = f + } +} + +// WithReportCaller will enable reporting of the caller. +func WithReportCaller() Option { + return func(l *logrusr) { + l.reportCaller = true + } +} + +// WithName will set an initial name instead of having to call `WithName` on the +// logger itself after constructing it. +func WithName(name ...string) Option { + return func(l *logrusr) { + l.name = name + + l.logger = l.logger.WithField( + "logger", strings.Join(l.name, "."), + ) + } +} + +type logrusr struct { + name []string + depth int + reportCaller bool + logger *logrus.Entry + formatter FormatFunc +} + +// New will return a new logr.Logger created from a logrus.FieldLogger. +func New(l logrus.FieldLogger, opts ...Option) logr.Logger { + // Immediately convert the FieldLogger to an Entry so we don't have to type + // cast and can use methods that exist on the Entry but not the FieldLogger + // interface. + logger := &logrusr{ + depth: 0, + logger: l.WithFields(logrus.Fields{}), + } + + for _, o := range opts { + o(logger) + } + + return logr.New(logger) +} + +// Init receives optional information about the library. +func (l *logrusr) Init(ri logr.RuntimeInfo) { + // By default `CallDepth` is set to 1 which means one of the frames is + // skipped by default. This was originally missed in this library making the + // default behavior and `WithCallDepth(0)` behave differently. + // To be backwards compatible without affecting anyone manually setting the + // call depth we reduce 1 from the default depth instead of not adding it. + // See https://github.com/bombsimon/logrusr/issues/19 for more info. + l.depth = ri.CallDepth - 1 +} + +// Enabled tests whether this Logger is enabled. It will return true if the +// logrus.Logger has a level set to logrus.InfoLevel or higher (Warn/Panic). +// According to the documentation, level V(0) should be equivalent as calling +// Info() directly on the logger. To ensure this the constant `logrusDiffToInfo` +// will be added to all passed values so that V(0) creates a logger with level +// logrus.InfoLevel and V(2) would create a logger with level logrus.TraceLevel. +// This menas that if logrus is set to logrus.InfoLevel or **higher** this +// method will return true, otherwise false. +func (l *logrusr) Enabled(level int) bool { + // logrus.InfoLevel has value 4 so if the level on the logger is set to 0 we + // should only be seen as enabled if the logrus logger has a severity of + // info or higher. + return l.logger.Logger.IsLevelEnabled(logrus.Level(level + logrusDiffToInfo)) +} + +// Info logs info messages if the logger is enabled, that is if the level on the +// logger is set to logrus.InfoLevel or less. +func (l *logrusr) Info(level int, msg string, keysAndValues ...interface{}) { + log := l.logger + if c := l.caller(); c != "" { + log = log.WithField("caller", c) + } + + log. + WithFields(listToLogrusFields(l.formatter, keysAndValues...)). + Log(logrus.Level(level+logrusDiffToInfo), msg) +} + +// Error logs error messages. Since the log will be written with `Error` level +// it won't show if the severity of the underlying logrus logger is less than +// Error. +func (l *logrusr) Error(err error, msg string, keysAndValues ...interface{}) { + log := l.logger + if c := l.caller(); c != "" { + log = log.WithField("caller", c) + } + + log. + WithFields(listToLogrusFields(l.formatter, keysAndValues...)). + WithError(err). + Error(msg) +} + +// WithValues returns a new logger with additional key/values pairs. This is +// equivalent to logrus WithFields() but takes a list of even arguments +// (key/value pairs) instead of a map as input. If an odd number of arguments +// are sent all values will be discarded. +func (l *logrusr) WithValues(keysAndValues ...interface{}) logr.LogSink { + newLogger := l.copyLogger() + newLogger.logger = newLogger.logger.WithFields( + listToLogrusFields(l.formatter, keysAndValues...), + ) + + return newLogger +} + +// WithName is a part of the Logger interface. This will set the key "logger" as +// a logrus field to identify the instance. +func (l *logrusr) WithName(name string) logr.LogSink { + newLogger := l.copyLogger() + newLogger.name = append(newLogger.name, name) + + newLogger.logger = newLogger.logger.WithField( + "logger", strings.Join(newLogger.name, "."), + ) + + return newLogger +} + +// listToLogrusFields converts a list of arbitrary length to key/value paris. +func listToLogrusFields(formatter FormatFunc, keysAndValues ...interface{}) logrus.Fields { + f := make(logrus.Fields) + + // Skip all fields if it's not an even length list. + if len(keysAndValues)%2 != 0 { + return f + } + + for i := 0; i < len(keysAndValues); i += 2 { + k, v := keysAndValues[i], keysAndValues[i+1] + + s, ok := k.(string) + if !ok { + continue + } + + if v, ok := v.(logr.Marshaler); ok { + f[s] = v.MarshalLog() + continue + } + + // Try to avoid marshaling known types. + switch vVal := v.(type) { + case int, int8, int16, int32, int64, + uint, uint8, uint16, uint32, uint64, + float32, float64, complex64, complex128, + string, bool: + f[s] = vVal + + case []byte: + f[s] = string(vVal) + + default: + if formatter != nil { + f[s] = formatter(v) + } else { + j, _ := json.Marshal(vVal) + f[s] = string(j) + } + } + } + + return f +} + +// copyLogger copies the logger creating a new slice of the name but preserving +// the formatter and actual logrus logger. +func (l *logrusr) copyLogger() *logrusr { + newLogger := &logrusr{ + name: make([]string, len(l.name)), + depth: l.depth, + reportCaller: l.reportCaller, + logger: l.logger.Dup(), + formatter: l.formatter, + } + + copy(newLogger.name, l.name) + + return newLogger +} + +// WithCallDepth implements the optional WithCallDepth to offset the call stack +// when reporting caller. +func (l *logrusr) WithCallDepth(depth int) logr.LogSink { + newLogger := l.copyLogger() + newLogger.depth += depth + + return newLogger +} + +// caller will return the caller of the logging method. +func (l *logrusr) caller() string { + // Check if we should even report the caller. + if !l.reportCaller { + return "" + } + + // +1 for this frame. + // +1 for frame calling here (Info/Error) + // +1 for logr frame + _, file, line, ok := runtime.Caller(l.depth + 3) + if !ok { + return "" + } + + return fmt.Sprintf("%s:%d", filepath.Base(file), line) +} diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go index a4a6bf79d7f..414a1a66535 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go @@ -129,7 +129,10 @@ type VSphereClusterDeprovision struct { // necessary for communicating with the VCenter. CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` // VCenter is the vSphere vCenter hostname. + // Deprecated: use VCenters instead VCenter string `json:"vCenter"` + // VCenters are potentially multiple vCenter hostnames. Prefer this field over VCenter. + VCenters []string `json:"vCenters"` } // IBMClusterDeprovision contains IBM Cloud specific configuration for a ClusterDeprovision diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/machinepools.go b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/machinepools.go index e08c4fab713..37e27be7cf2 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/machinepools.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/machinepools.go @@ -1,33 +1,27 @@ package vsphere +import "github.com/openshift/installer/pkg/types/vsphere" + // MachinePool stores the configuration for a machine pool installed // on vSphere. type MachinePool struct { + vsphere.MachinePool `json:",inline"` + // ResourcePool is the name of the resource pool that will be used for virtual machines. // If it is not present, a default value will be used. + // Deprecated: use Topology instead // +optional - ResourcePool string `json:"resourcePool,omitempty"` - - // NumCPUs is the total number of virtual processor cores to assign a vm. - NumCPUs int32 `json:"cpus"` - - // NumCoresPerSocket is the number of cores per socket in a vm. The number - // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. - NumCoresPerSocket int32 `json:"coresPerSocket"` - - // Memory is the size of a VM's memory in MB. - MemoryMiB int64 `json:"memoryMB"` - - // OSDisk defines the storage for instance. - OSDisk `json:"osDisk"` + DeprecatedResourcePool string `json:"resourcePool,omitempty"` // TagIDs is a list of up to 10 tags to add to the VMs that this machine set provisions in vSphere. + // Deprecated: use Topology instead // +kubebuilder:validation:MaxItems:=10 - TagIDs []string `json:"tagIDs,omitempty"` -} + DeprecatedTagIDs []string `json:"tagIDs,omitempty"` -// OSDisk defines the disk for a virtual machine. -type OSDisk struct { - // DiskSizeGB defines the size of disk in GB. - DiskSizeGB int32 `json:"diskSizeGB"` + // Topology is the vSphere topology that will be used for virtual machines. + // If it is not present, a default value will be used. + // +optional + Topology *vsphere.Topology `json:"topology,omitempty"` } + +type OSDisk = vsphere.OSDisk diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/platform.go b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/platform.go index 62f1f84372d..21c05b5760a 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/platform.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/platform.go @@ -1,13 +1,24 @@ package vsphere import ( + "fmt" + "strings" + + "github.com/go-logr/logr" + "github.com/openshift/installer/pkg/types/vsphere" corev1 "k8s.io/api/core/v1" ) // Platform stores any global configuration used for vSphere platforms. type Platform struct { + // Infrastructure is the desired state of the vSphere infrastructure provider. + Infrastructure *vsphere.Platform `json:"infrastructure,omitempty"` + // VCenter is the domain name or IP address of the vCenter. - VCenter string `json:"vCenter"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedVCenter string `json:"vCenter,omitempty"` // CredentialsSecretRef refers to a secret that contains the vSphere account access // credentials: GOVC_USERNAME, GOVC_PASSWORD fields. @@ -18,18 +29,92 @@ type Platform struct { CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"` // Datacenter is the name of the datacenter to use in the vCenter. - Datacenter string `json:"datacenter"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedDatacenter string `json:"datacenter,omitempty"` // DefaultDatastore is the default datastore to use for provisioning volumes. - DefaultDatastore string `json:"defaultDatastore"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedDefaultDatastore string `json:"defaultDatastore,omitempty"` // Folder is the name of the folder that will be used and/or created for // virtual machines. - Folder string `json:"folder,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedFolder string `json:"folder,omitempty"` // Cluster is the name of the cluster virtual machines will be cloned into. - Cluster string `json:"cluster,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedCluster string `json:"cluster,omitempty"` // Network specifies the name of the network to be used by the cluster. - Network string `json:"network,omitempty"` + // Deprecated: Please use Platform.Infrastructure instead + // See also: Platform.ConvertDeprecatedFields + // +optional + DeprecatedNetwork string `json:"network,omitempty"` +} + +func (p *Platform) ConvertDeprecatedFields(logger logr.Logger) { + if p.Infrastructure != nil { + return + } + + p.Infrastructure = &vsphere.Platform{ + VCenters: []vsphere.VCenter{ + { + Server: p.DeprecatedVCenter, + Port: 443, + Datacenters: []string{p.DeprecatedDatacenter}, + }, + }, + FailureDomains: []vsphere.FailureDomain{ + { + // names from https://github.com/openshift/installer/blob/f7731922a0f17a8339a3e837f72898ac77643611/pkg/types/vsphere/conversion/installconfig.go#L58-L61 + Name: "generated-failure-domain", + Region: "generated-region", + Zone: "generated-zone", + Server: p.DeprecatedVCenter, + Topology: vsphere.Topology{ + Datacenter: p.DeprecatedDatacenter, + Datastore: setDatastorePath(p.DeprecatedDefaultDatastore, p.DeprecatedDatacenter, logger), + Folder: setFolderPath(p.DeprecatedFolder, p.DeprecatedDatacenter, logger), + ComputeCluster: setComputeClusterPath(p.DeprecatedCluster, p.DeprecatedDatacenter, logger), + Networks: []string{p.DeprecatedNetwork}, + }, + }, + }, + } + +} + +// Copied (and slightly modified) from https://github.com/openshift/installer/blob/f7731922a0f17a8339a3e837f72898ac77643611/pkg/types/vsphere/conversion/installconfig.go#L75-L97 + +func setComputeClusterPath(cluster, datacenter string, logger logr.Logger) string { + if cluster != "" && !strings.HasPrefix(cluster, "/") { + logger.V(1).Info(fmt.Sprintf("computeCluster as a non-path is now depreciated please use the form: /%s/host/%s", datacenter, cluster)) + return fmt.Sprintf("/%s/host/%s", datacenter, cluster) + } + return cluster +} + +func setDatastorePath(datastore, datacenter string, logger logr.Logger) string { + if datastore != "" && !strings.HasPrefix(datastore, "/") { + logger.V(1).Info(fmt.Sprintf("datastore as a non-path is now depreciated please use the form: /%s/datastore/%s", datacenter, datastore)) + return fmt.Sprintf("/%s/datastore/%s", datacenter, datastore) + } + return datastore +} + +func setFolderPath(folder, datacenter string, logger logr.Logger) string { + if folder != "" && !strings.HasPrefix(folder, "/") { + logger.V(1).Info(fmt.Sprintf("folder as a non-path is now depreciated please use the form: /%s/vm/%s", datacenter, folder)) + return fmt.Sprintf("/%s/vm/%s", datacenter, folder) + } + return folder } diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/zz_generated.deepcopy.go index 6108b1415a7..62008c9aeb0 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/vsphere/zz_generated.deepcopy.go @@ -5,15 +5,24 @@ package vsphere +import ( + typesvsphere "github.com/openshift/installer/pkg/types/vsphere" +) + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachinePool) DeepCopyInto(out *MachinePool) { *out = *in - out.OSDisk = in.OSDisk - if in.TagIDs != nil { - in, out := &in.TagIDs, &out.TagIDs + in.MachinePool.DeepCopyInto(&out.MachinePool) + if in.DeprecatedTagIDs != nil { + in, out := &in.DeprecatedTagIDs, &out.DeprecatedTagIDs *out = make([]string, len(*in)) copy(*out, *in) } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(typesvsphere.Topology) + (*in).DeepCopyInto(*out) + } return } @@ -27,25 +36,14 @@ func (in *MachinePool) DeepCopy() *MachinePool { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSDisk) DeepCopyInto(out *OSDisk) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. -func (in *OSDisk) DeepCopy() *OSDisk { - if in == nil { - return nil - } - out := new(OSDisk) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Platform) DeepCopyInto(out *Platform) { *out = *in + if in.Infrastructure != nil { + in, out := &in.Infrastructure, &out.Infrastructure + *out = new(typesvsphere.Platform) + (*in).DeepCopyInto(*out) + } out.CredentialsSecretRef = in.CredentialsSecretRef out.CertificatesSecretRef = in.CertificatesSecretRef return diff --git a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go index b20056e20c1..fdf0bd54b44 100644 --- a/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go @@ -1070,7 +1070,7 @@ func (in *ClusterDeprovisionPlatform) DeepCopyInto(out *ClusterDeprovisionPlatfo if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(VSphereClusterDeprovision) - **out = **in + (*in).DeepCopyInto(*out) } if in.IBMCloud != nil { in, out := &in.IBMCloud, &out.IBMCloud @@ -3419,7 +3419,7 @@ func (in *Platform) DeepCopyInto(out *Platform) { if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(vsphere.Platform) - **out = **in + (*in).DeepCopyInto(*out) } if in.AgentBareMetal != nil { in, out := &in.AgentBareMetal, &out.AgentBareMetal @@ -4181,6 +4181,11 @@ func (in *VSphereClusterDeprovision) DeepCopyInto(out *VSphereClusterDeprovision *out = *in out.CredentialsSecretRef = in.CredentialsSecretRef out.CertificatesSecretRef = in.CertificatesSecretRef + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/openshift/installer/pkg/ipnet/ipnet.go b/vendor/github.com/openshift/installer/pkg/ipnet/ipnet.go index 40bb903ba39..62715d2e4e1 100644 --- a/vendor/github.com/openshift/installer/pkg/ipnet/ipnet.go +++ b/vendor/github.com/openshift/installer/pkg/ipnet/ipnet.go @@ -95,3 +95,22 @@ func MustParseCIDR(s string) *IPNet { } return cidr } + +// DeepCopyInto copies the receiver into out. out must be non-nil. +func (ipnet *IPNet) DeepCopyInto(out *IPNet) { + if ipnet == nil { + *out = IPNet{} + } else { + *out = *ipnet + } +} + +// DeepCopy copies the receiver, creating a new IPNet. +func (ipnet *IPNet) DeepCopy() *IPNet { + if ipnet == nil { + return nil + } + out := new(IPNet) + ipnet.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/aws/doc.go b/vendor/github.com/openshift/installer/pkg/types/aws/doc.go index 6d494c47651..25b9378a17c 100644 --- a/vendor/github.com/openshift/installer/pkg/types/aws/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/aws/doc.go @@ -1,5 +1,6 @@ // Package aws contains AWS-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package aws // Name is name for the AWS platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/aws/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/aws/zz_generated.deepcopy.go new file mode 100644 index 00000000000..2a18f381c87 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/aws/zz_generated.deepcopy.go @@ -0,0 +1,220 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package aws + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2Metadata) DeepCopyInto(out *EC2Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2Metadata. +func (in *EC2Metadata) DeepCopy() *EC2Metadata { + if in == nil { + return nil + } + out := new(EC2Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EC2RootVolume) DeepCopyInto(out *EC2RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2RootVolume. +func (in *EC2RootVolume) DeepCopy() *EC2RootVolume { + if in == nil { + return nil + } + out := new(EC2RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.EC2RootVolume = in.EC2RootVolume + out.EC2Metadata = in.EC2Metadata + if in.AdditionalSecurityGroupIDs != nil { + in, out := &in.AdditionalSecurityGroupIDs, &out.AdditionalSecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]ServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = make([]map[string]string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.DeprecatedSubnets != nil { + in, out := &in.DeprecatedSubnets, &out.DeprecatedSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.VPC.DeepCopyInto(&out.VPC) + if in.UserTags != nil { + in, out := &in.UserTags, &out.UserTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]ServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.ExperimentalPropagateUserTag != nil { + in, out := &in.ExperimentalPropagateUserTag, &out.ExperimentalPropagateUserTag + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceEndpoint) DeepCopyInto(out *ServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceEndpoint. +func (in *ServiceEndpoint) DeepCopy() *ServiceEndpoint { + if in == nil { + return nil + } + out := new(ServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subnet) DeepCopyInto(out *Subnet) { + *out = *in + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]SubnetRole, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet. +func (in *Subnet) DeepCopy() *Subnet { + if in == nil { + return nil + } + out := new(Subnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetRole) DeepCopyInto(out *SubnetRole) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetRole. +func (in *SubnetRole) DeepCopy() *SubnetRole { + if in == nil { + return nil + } + out := new(SubnetRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VPC) DeepCopyInto(out *VPC) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]Subnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPC. +func (in *VPC) DeepCopy() *VPC { + if in == nil { + return nil + } + out := new(VPC) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/azure/doc.go b/vendor/github.com/openshift/installer/pkg/types/azure/doc.go index 6ce852718fb..7d9ede453f7 100644 --- a/vendor/github.com/openshift/installer/pkg/types/azure/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/azure/doc.go @@ -1,5 +1,6 @@ // Package azure contains Azure-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package azure // Name is the name for the Azure platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/azure/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/azure/zz_generated.deepcopy.go new file mode 100644 index 00000000000..20955f6bc29 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/azure/zz_generated.deepcopy.go @@ -0,0 +1,352 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package azure + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootDiagnostics) DeepCopyInto(out *BootDiagnostics) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootDiagnostics. +func (in *BootDiagnostics) DeepCopy() *BootDiagnostics { + if in == nil { + return nil + } + out := new(BootDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialVM) DeepCopyInto(out *ConfidentialVM) { + *out = *in + if in.UEFISettings != nil { + in, out := &in.UEFISettings, &out.UEFISettings + *out = new(UEFISettings) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialVM. +func (in *ConfidentialVM) DeepCopy() *ConfidentialVM { + if in == nil { + return nil + } + out := new(ConfidentialVM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomerManagedKey) DeepCopyInto(out *CustomerManagedKey) { + *out = *in + out.KeyVault = in.KeyVault + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomerManagedKey. +func (in *CustomerManagedKey) DeepCopy() *CustomerManagedKey { + if in == nil { + return nil + } + out := new(CustomerManagedKey) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSet) DeepCopyInto(out *DiskEncryptionSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSet. +func (in *DiskEncryptionSet) DeepCopy() *DiskEncryptionSet { + if in == nil { + return nil + } + out := new(DiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyVault) DeepCopyInto(out *KeyVault) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyVault. +func (in *KeyVault) DeepCopy() *KeyVault { + if in == nil { + return nil + } + out := new(KeyVault) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.OSDisk.DeepCopyInto(&out.OSDisk) + if in.BootDiagnostics != nil { + in, out := &in.BootDiagnostics, &out.BootDiagnostics + *out = new(BootDiagnostics) + **out = **in + } + out.OSImage = in.OSImage + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(SecuritySettings) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(VMIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSet) + **out = **in + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(VMDiskSecurityProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImage) DeepCopyInto(out *OSImage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImage. +func (in *OSImage) DeepCopy() *OSImage { + if in == nil { + return nil + } + out := new(OSImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.UserTags != nil { + in, out := &in.UserTags, &out.UserTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CustomerManagedKey != nil { + in, out := &in.CustomerManagedKey, &out.CustomerManagedKey + *out = new(CustomerManagedKey) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettings) DeepCopyInto(out *SecuritySettings) { + *out = *in + if in.ConfidentialVM != nil { + in, out := &in.ConfidentialVM, &out.ConfidentialVM + *out = new(ConfidentialVM) + (*in).DeepCopyInto(*out) + } + if in.TrustedLaunch != nil { + in, out := &in.TrustedLaunch, &out.TrustedLaunch + *out = new(TrustedLaunch) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettings. +func (in *SecuritySettings) DeepCopy() *SecuritySettings { + if in == nil { + return nil + } + out := new(SecuritySettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedLaunch) DeepCopyInto(out *TrustedLaunch) { + *out = *in + if in.UEFISettings != nil { + in, out := &in.UEFISettings, &out.UEFISettings + *out = new(UEFISettings) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedLaunch. +func (in *TrustedLaunch) DeepCopy() *TrustedLaunch { + if in == nil { + return nil + } + out := new(TrustedLaunch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UEFISettings) DeepCopyInto(out *UEFISettings) { + *out = *in + if in.SecureBoot != nil { + in, out := &in.SecureBoot, &out.SecureBoot + *out = new(string) + **out = **in + } + if in.VirtualizedTrustedPlatformModule != nil { + in, out := &in.VirtualizedTrustedPlatformModule, &out.VirtualizedTrustedPlatformModule + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UEFISettings. +func (in *UEFISettings) DeepCopy() *UEFISettings { + if in == nil { + return nil + } + out := new(UEFISettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAssignedIdentity) DeepCopyInto(out *UserAssignedIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAssignedIdentity. +func (in *UserAssignedIdentity) DeepCopy() *UserAssignedIdentity { + if in == nil { + return nil + } + out := new(UserAssignedIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMDiskSecurityProfile) DeepCopyInto(out *VMDiskSecurityProfile) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMDiskSecurityProfile. +func (in *VMDiskSecurityProfile) DeepCopy() *VMDiskSecurityProfile { + if in == nil { + return nil + } + out := new(VMDiskSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMIdentity) DeepCopyInto(out *VMIdentity) { + *out = *in + if in.UserAssignedIdentities != nil { + in, out := &in.UserAssignedIdentities, &out.UserAssignedIdentities + *out = make([]UserAssignedIdentity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMIdentity. +func (in *VMIdentity) DeepCopy() *VMIdentity { + if in == nil { + return nil + } + out := new(VMIdentity) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/baremetal/doc.go b/vendor/github.com/openshift/installer/pkg/types/baremetal/doc.go index db078844da8..0b50766a51d 100644 --- a/vendor/github.com/openshift/installer/pkg/types/baremetal/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/baremetal/doc.go @@ -1,5 +1,6 @@ // Package baremetal contains baremetal-specific structures for // installer configuration and management. +// +k8s:deepcopy-gen=package package baremetal // Name is the name for the baremetal platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/baremetal/platform.go b/vendor/github.com/openshift/installer/pkg/types/baremetal/platform.go index 311faebfa61..cd1dec376c0 100644 --- a/vendor/github.com/openshift/installer/pkg/types/baremetal/platform.go +++ b/vendor/github.com/openshift/installer/pkg/types/baremetal/platform.go @@ -187,7 +187,6 @@ type Platform struct { // one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional APIVIPs []string `json:"apiVIPs,omitempty"` @@ -203,7 +202,6 @@ type Platform struct { // clusters it contains an IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional IngressVIPs []string `json:"ingressVIPs,omitempty"` @@ -247,7 +245,6 @@ type Platform struct { // AdditionalNTPServers defines a list of additional NTP servers // to use for provisioning - // +kubebuilder:validation:UniqueItems=true // +optional AdditionalNTPServers []string `json:"additionalNTPServers,omitempty"` } diff --git a/vendor/github.com/openshift/installer/pkg/types/baremetal/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/baremetal/zz_generated.deepcopy.go new file mode 100644 index 00000000000..742d51cc4d0 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/baremetal/zz_generated.deepcopy.go @@ -0,0 +1,163 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package baremetal + +import ( + configv1 "github.com/openshift/api/config/v1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMC) DeepCopyInto(out *BMC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMC. +func (in *BMC) DeepCopy() *BMC { + if in == nil { + return nil + } + out := new(BMC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Host) DeepCopyInto(out *Host) { + *out = *in + out.BMC = in.BMC + if in.RootDeviceHints != nil { + in, out := &in.RootDeviceHints, &out.RootDeviceHints + *out = new(RootDeviceHints) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfig != nil { + in, out := &in.NetworkConfig, &out.NetworkConfig + *out = new(v1.JSON) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host. +func (in *Host) DeepCopy() *Host { + if in == nil { + return nil + } + out := new(Host) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.ProvisioningNetworkCIDR != nil { + in, out := &in.ProvisioningNetworkCIDR, &out.ProvisioningNetworkCIDR + *out = (*in).DeepCopy() + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*Host, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Host) + (*in).DeepCopyInto(*out) + } + } + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + **out = **in + } + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(configv1.BareMetalPlatformLoadBalancer) + **out = **in + } + if in.AdditionalNTPServers != nil { + in, out := &in.AdditionalNTPServers, &out.AdditionalNTPServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootDeviceHints) DeepCopyInto(out *RootDeviceHints) { + *out = *in + if in.Rotational != nil { + in, out := &in.Rotational, &out.Rotational + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDeviceHints. +func (in *RootDeviceHints) DeepCopy() *RootDeviceHints { + if in == nil { + return nil + } + out := new(RootDeviceHints) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/doc.go b/vendor/github.com/openshift/installer/pkg/types/doc.go index 7f7f45efbc9..313d5b2be1a 100644 --- a/vendor/github.com/openshift/installer/pkg/types/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/doc.go @@ -3,4 +3,5 @@ // Package types defines structures for installer configuration and // management. +// +k8s:deepcopy-gen=package package types diff --git a/vendor/github.com/openshift/installer/pkg/types/external/doc.go b/vendor/github.com/openshift/installer/pkg/types/external/doc.go index 0418e098293..223beb1dc06 100644 --- a/vendor/github.com/openshift/installer/pkg/types/external/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/external/doc.go @@ -1,5 +1,6 @@ -// Package none contains generic structures for installer +// Package external contains generic structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package external // Name is name for the External platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/external/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/external/zz_generated.deepcopy.go new file mode 100644 index 00000000000..bb3b0b4742e --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/external/zz_generated.deepcopy.go @@ -0,0 +1,22 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package external + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/gcp/doc.go b/vendor/github.com/openshift/installer/pkg/types/gcp/doc.go index 6ea4c971e9a..b8790786219 100644 --- a/vendor/github.com/openshift/installer/pkg/types/gcp/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/gcp/doc.go @@ -1,5 +1,6 @@ // Package gcp contains GCP-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package gcp // Name is name for the gcp platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/gcp/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/gcp/zz_generated.deepcopy.go new file mode 100644 index 00000000000..46b220dd29a --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/gcp/zz_generated.deepcopy.go @@ -0,0 +1,266 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package gcp + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionKeyReference) DeepCopyInto(out *EncryptionKeyReference) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(KMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionKeyReference. +func (in *EncryptionKeyReference) DeepCopy() *EncryptionKeyReference { + if in == nil { + return nil + } + out := new(EncryptionKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSKeyReference) DeepCopyInto(out *KMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSKeyReference. +func (in *KMSKeyReference) DeepCopy() *KMSKeyReference { + if in == nil { + return nil + } + out := new(KMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.OSDisk.DeepCopyInto(&out.OSDisk) + if in.OSImage != nil { + in, out := &in.OSImage, &out.OSImage + *out = new(OSImage) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metric) DeepCopyInto(out *Metric) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metric. +func (in *Metric) DeepCopy() *Metric { + if in == nil { + return nil + } + out := new(Metric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(EncryptionKeyReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSImage) DeepCopyInto(out *OSImage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImage. +func (in *OSImage) DeepCopy() *OSImage { + if in == nil { + return nil + } + out := new(OSImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.UserLabels != nil { + in, out := &in.UserLabels, &out.UserLabels + *out = make([]UserLabel, len(*in)) + copy(*out, *in) + } + if in.UserTags != nil { + in, out := &in.UserTags, &out.UserTags + *out = make([]UserTag, len(*in)) + copy(*out, *in) + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]v1.GCPServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Quota) DeepCopyInto(out *Quota) { + { + in := &in + *out = make(Quota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Quota. +func (in Quota) DeepCopy() Quota { + if in == nil { + return nil + } + out := new(Quota) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuotaUsage) DeepCopyInto(out *QuotaUsage) { + *out = *in + if in.Metric != nil { + in, out := &in.Metric, &out.Metric + *out = new(Metric) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuotaUsage. +func (in *QuotaUsage) DeepCopy() *QuotaUsage { + if in == nil { + return nil + } + out := new(QuotaUsage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserLabel) DeepCopyInto(out *UserLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserLabel. +func (in *UserLabel) DeepCopy() *UserLabel { + if in == nil { + return nil + } + out := new(UserLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserTag) DeepCopyInto(out *UserTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserTag. +func (in *UserTag) DeepCopy() *UserTag { + if in == nil { + return nil + } + out := new(UserTag) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/ibmcloud/doc.go b/vendor/github.com/openshift/installer/pkg/types/ibmcloud/doc.go index fd470986ff7..d0c60af0191 100644 --- a/vendor/github.com/openshift/installer/pkg/types/ibmcloud/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/ibmcloud/doc.go @@ -1,5 +1,6 @@ // Package ibmcloud contains IBM Cloud-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package ibmcloud // Name is name for the ibmcloud platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/ibmcloud/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/ibmcloud/zz_generated.deepcopy.go new file mode 100644 index 00000000000..b472b34a240 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/ibmcloud/zz_generated.deepcopy.go @@ -0,0 +1,246 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package ibmcloud + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootVolume) DeepCopyInto(out *BootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootVolume. +func (in *BootVolume) DeepCopy() *BootVolume { + if in == nil { + return nil + } + out := new(BootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHost. +func (in *DedicatedHost) DeepCopy() *DedicatedHost { + if in == nil { + return nil + } + out := new(DedicatedHost) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsJSON) DeepCopyInto(out *EndpointsJSON) { + *out = *in + if in.IBMCloudEndpointCIS != nil { + in, out := &in.IBMCloudEndpointCIS, &out.IBMCloudEndpointCIS + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointCOS != nil { + in, out := &in.IBMCloudEndpointCOS, &out.IBMCloudEndpointCOS + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointCOSConfig != nil { + in, out := &in.IBMCloudEndpointCOSConfig, &out.IBMCloudEndpointCOSConfig + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointDNSServices != nil { + in, out := &in.IBMCloudEndpointDNSServices, &out.IBMCloudEndpointDNSServices + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointGlobalCatalog != nil { + in, out := &in.IBMCloudEndpointGlobalCatalog, &out.IBMCloudEndpointGlobalCatalog + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointGlobalSearch != nil { + in, out := &in.IBMCloudEndpointGlobalSearch, &out.IBMCloudEndpointGlobalSearch + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointGlobalTagging != nil { + in, out := &in.IBMCloudEndpointGlobalTagging, &out.IBMCloudEndpointGlobalTagging + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointHyperProtect != nil { + in, out := &in.IBMCloudEndpointHyperProtect, &out.IBMCloudEndpointHyperProtect + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointIAM != nil { + in, out := &in.IBMCloudEndpointIAM, &out.IBMCloudEndpointIAM + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointKeyProtect != nil { + in, out := &in.IBMCloudEndpointKeyProtect, &out.IBMCloudEndpointKeyProtect + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointResourceController != nil { + in, out := &in.IBMCloudEndpointResourceController, &out.IBMCloudEndpointResourceController + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointResourceManager != nil { + in, out := &in.IBMCloudEndpointResourceManager, &out.IBMCloudEndpointResourceManager + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + if in.IBMCloudEndpointVPC != nil { + in, out := &in.IBMCloudEndpointVPC, &out.IBMCloudEndpointVPC + *out = new(EndpointsVisibility) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsJSON. +func (in *EndpointsJSON) DeepCopy() *EndpointsJSON { + if in == nil { + return nil + } + out := new(EndpointsJSON) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsVisibility) DeepCopyInto(out *EndpointsVisibility) { + *out = *in + if in.Private != nil { + in, out := &in.Private, &out.Private + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Public != nil { + in, out := &in.Public, &out.Public + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsVisibility. +func (in *EndpointsVisibility) DeepCopy() *EndpointsVisibility { + if in == nil { + return nil + } + out := new(EndpointsVisibility) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BootVolume != nil { + in, out := &in.BootVolume, &out.BootVolume + *out = new(BootVolume) + **out = **in + } + if in.DedicatedHosts != nil { + in, out := &in.DedicatedHosts, &out.DedicatedHosts + *out = make([]DedicatedHost, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]v1.IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.ControlPlaneSubnets != nil { + in, out := &in.ControlPlaneSubnets, &out.ControlPlaneSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ComputeSubnets != nil { + in, out := &in.ComputeSubnets, &out.ComputeSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]v1.IBMCloudServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/installconfig.go b/vendor/github.com/openshift/installer/pkg/types/installconfig.go index cc3c953850c..1f6ccb161cf 100644 --- a/vendor/github.com/openshift/installer/pkg/types/installconfig.go +++ b/vendor/github.com/openshift/installer/pkg/types/installconfig.go @@ -88,6 +88,7 @@ const ( ) //go:generate go run ../../vendor/sigs.k8s.io/controller-tools/cmd/controller-gen crd:crdVersions=v1 paths=. output:dir=../../data/data/ +//go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen --output-file zz_generated.deepcopy.go ./... // InstallConfig is the configuration for an OpenShift install. type InstallConfig struct { diff --git a/vendor/github.com/openshift/installer/pkg/types/none/doc.go b/vendor/github.com/openshift/installer/pkg/types/none/doc.go index e16709984b6..51f98192ea8 100644 --- a/vendor/github.com/openshift/installer/pkg/types/none/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/none/doc.go @@ -1,5 +1,6 @@ // Package none contains generic structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package none // Name is name for the None platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/none/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/none/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e474004e81a --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/none/zz_generated.deepcopy.go @@ -0,0 +1,22 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package none + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/nutanix/doc.go b/vendor/github.com/openshift/installer/pkg/types/nutanix/doc.go index 96939d2566c..a79ba008bf7 100644 --- a/vendor/github.com/openshift/installer/pkg/types/nutanix/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/nutanix/doc.go @@ -1,5 +1,6 @@ // Package nutanix contains Nutanix-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package nutanix // Name is the platform in the package diff --git a/vendor/github.com/openshift/installer/pkg/types/nutanix/platform.go b/vendor/github.com/openshift/installer/pkg/types/nutanix/platform.go index 2778dceda3e..1f31b09730a 100644 --- a/vendor/github.com/openshift/installer/pkg/types/nutanix/platform.go +++ b/vendor/github.com/openshift/installer/pkg/types/nutanix/platform.go @@ -45,7 +45,6 @@ type Platform struct { // it contains an IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional APIVIPs []string `json:"apiVIPs,omitempty"` @@ -61,7 +60,6 @@ type Platform struct { // it contains an IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional IngressVIPs []string `json:"ingressVIPs,omitempty"` diff --git a/vendor/github.com/openshift/installer/pkg/types/nutanix/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/nutanix/zz_generated.deepcopy.go new file mode 100644 index 00000000000..af5d0086bcb --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/nutanix/zz_generated.deepcopy.go @@ -0,0 +1,299 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package nutanix + +import ( + configv1 "github.com/openshift/api/config/v1" + v1 "github.com/openshift/api/machine/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDisk) DeepCopyInto(out *DataDisk) { + *out = *in + out.DiskSize = in.DiskSize.DeepCopy() + if in.DeviceProperties != nil { + in, out := &in.DeviceProperties, &out.DeviceProperties + *out = new(v1.NutanixVMDiskDeviceProperties) + **out = **in + } + if in.StorageConfig != nil { + in, out := &in.StorageConfig, &out.StorageConfig + *out = new(StorageConfig) + (*in).DeepCopyInto(*out) + } + if in.DataSourceImage != nil { + in, out := &in.DataSourceImage, &out.DataSourceImage + *out = new(StorageResourceReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk. +func (in *DataDisk) DeepCopy() *DataDisk { + if in == nil { + return nil + } + out := new(DataDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailureDomain) DeepCopyInto(out *FailureDomain) { + *out = *in + out.PrismElement = in.PrismElement + if in.SubnetUUIDs != nil { + in, out := &in.SubnetUUIDs, &out.SubnetUUIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StorageContainers != nil { + in, out := &in.StorageContainers, &out.StorageContainers + *out = make([]StorageResourceReference, len(*in)) + copy(*out, *in) + } + if in.DataSourceImages != nil { + in, out := &in.DataSourceImages, &out.DataSourceImages + *out = make([]StorageResourceReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDomain. +func (in *FailureDomain) DeepCopy() *FailureDomain { + if in == nil { + return nil + } + out := new(FailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + out.OSDisk = in.OSDisk + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(v1.NutanixResourceIdentifier) + (*in).DeepCopyInto(*out) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]v1.NutanixCategory, len(*in)) + copy(*out, *in) + } + if in.GPUs != nil { + in, out := &in.GPUs, &out.GPUs + *out = make([]v1.NutanixGPU, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + out.PrismCentral = in.PrismCentral + if in.PrismElements != nil { + in, out := &in.PrismElements, &out.PrismElements + *out = make([]PrismElement, len(*in)) + copy(*out, *in) + } + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.SubnetUUIDs != nil { + in, out := &in.SubnetUUIDs, &out.SubnetUUIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(configv1.NutanixPlatformLoadBalancer) + **out = **in + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PrismAPICallTimeout != nil { + in, out := &in.PrismAPICallTimeout, &out.PrismAPICallTimeout + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrismCentral) DeepCopyInto(out *PrismCentral) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrismCentral. +func (in *PrismCentral) DeepCopy() *PrismCentral { + if in == nil { + return nil + } + out := new(PrismCentral) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrismElement) DeepCopyInto(out *PrismElement) { + *out = *in + out.Endpoint = in.Endpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrismElement. +func (in *PrismElement) DeepCopy() *PrismElement { + if in == nil { + return nil + } + out := new(PrismElement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrismEndpoint) DeepCopyInto(out *PrismEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrismEndpoint. +func (in *PrismEndpoint) DeepCopy() *PrismEndpoint { + if in == nil { + return nil + } + out := new(PrismEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageConfig) DeepCopyInto(out *StorageConfig) { + *out = *in + if in.StorageContainer != nil { + in, out := &in.StorageContainer, &out.StorageContainer + *out = new(StorageResourceReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageConfig. +func (in *StorageConfig) DeepCopy() *StorageConfig { + if in == nil { + return nil + } + out := new(StorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageResourceReference) DeepCopyInto(out *StorageResourceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageResourceReference. +func (in *StorageResourceReference) DeepCopy() *StorageResourceReference { + if in == nil { + return nil + } + out := new(StorageResourceReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/openstack/doc.go b/vendor/github.com/openshift/installer/pkg/types/openstack/doc.go index d7fc4d6a88d..8deb3fb97aa 100644 --- a/vendor/github.com/openshift/installer/pkg/types/openstack/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/openstack/doc.go @@ -1,5 +1,6 @@ // Package openstack contains OpenStack-specific structures for // installer configuration and management. +// +k8s:deepcopy-gen=package package openstack // Name is the name for the Openstack platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/openstack/platform.go b/vendor/github.com/openshift/installer/pkg/types/openstack/platform.go index e94b943b1d0..82e83107909 100644 --- a/vendor/github.com/openshift/installer/pkg/types/openstack/platform.go +++ b/vendor/github.com/openshift/installer/pkg/types/openstack/platform.go @@ -88,7 +88,6 @@ type Platform struct { // CIDR // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional APIVIPs []string `json:"apiVIPs,omitempty"` @@ -108,7 +107,6 @@ type Platform struct { // CIDR // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional IngressVIPs []string `json:"ingressVIPs,omitempty"` diff --git a/vendor/github.com/openshift/installer/pkg/types/openstack/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/openstack/zz_generated.deepcopy.go new file mode 100644 index 00000000000..a3dfb4b4d02 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/openstack/zz_generated.deepcopy.go @@ -0,0 +1,219 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package openstack + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedIP) DeepCopyInto(out *FixedIP) { + *out = *in + out.Subnet = in.Subnet + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIP. +func (in *FixedIP) DeepCopy() *FixedIP { + if in == nil { + return nil + } + out := new(FixedIP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(RootVolume) + (*in).DeepCopyInto(*out) + } + if in.AdditionalNetworkIDs != nil { + in, out := &in.AdditionalNetworkIDs, &out.AdditionalNetworkIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdditionalSecurityGroupIDs != nil { + in, out := &in.AdditionalSecurityGroupIDs, &out.AdditionalSecurityGroupIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.Identifier != nil { + in, out := &in.Identifier, &out.Identifier + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkFilter) DeepCopyInto(out *NetworkFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkFilter. +func (in *NetworkFilter) DeepCopy() *NetworkFilter { + if in == nil { + return nil + } + out := new(NetworkFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.ExternalDNS != nil { + in, out := &in.ExternalDNS, &out.ExternalDNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterOSImageProperties != nil { + in, out := &in.ClusterOSImageProperties, &out.ClusterOSImageProperties + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ControlPlanePort != nil { + in, out := &in.ControlPlanePort, &out.ControlPlanePort + *out = new(PortTarget) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(v1.OpenStackPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortTarget) DeepCopyInto(out *PortTarget) { + *out = *in + out.Network = in.Network + if in.FixedIPs != nil { + in, out := &in.FixedIPs, &out.FixedIPs + *out = make([]FixedIP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortTarget. +func (in *PortTarget) DeepCopy() *PortTarget { + if in == nil { + return nil + } + out := new(PortTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootVolume) DeepCopyInto(out *RootVolume) { + *out = *in + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. +func (in *RootVolume) DeepCopy() *RootVolume { + if in == nil { + return nil + } + out := new(RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. +func (in *SubnetFilter) DeepCopy() *SubnetFilter { + if in == nil { + return nil + } + out := new(SubnetFilter) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/ovirt/doc.go b/vendor/github.com/openshift/installer/pkg/types/ovirt/doc.go index 1e1bbef2e17..ae1f154b506 100644 --- a/vendor/github.com/openshift/installer/pkg/types/ovirt/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/ovirt/doc.go @@ -1,5 +1,6 @@ // Package ovirt contains ovirt-specific structures for // installer configuration and management. +// +k8s:deepcopy-gen=package package ovirt // Name is the name for the ovirt platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/ovirt/platform.go b/vendor/github.com/openshift/installer/pkg/types/ovirt/platform.go index d54e6cf500e..8e3194d5939 100644 --- a/vendor/github.com/openshift/installer/pkg/types/ovirt/platform.go +++ b/vendor/github.com/openshift/installer/pkg/types/ovirt/platform.go @@ -36,7 +36,6 @@ type Platform struct { // IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional APIVIPs []string `json:"api_vips,omitempty"` @@ -55,7 +54,6 @@ type Platform struct { // IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional IngressVIPs []string `json:"ingress_vips,omitempty"` diff --git a/vendor/github.com/openshift/installer/pkg/types/ovirt/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/ovirt/zz_generated.deepcopy.go new file mode 100644 index 00000000000..5eb9b8ef056 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/ovirt/zz_generated.deepcopy.go @@ -0,0 +1,156 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package ovirt + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AffinityGroup) DeepCopyInto(out *AffinityGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AffinityGroup. +func (in *AffinityGroup) DeepCopy() *AffinityGroup { + if in == nil { + return nil + } + out := new(AffinityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPU) DeepCopyInto(out *CPU) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. +func (in *CPU) DeepCopy() *CPU { + if in == nil { + return nil + } + out := new(CPU) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Disk) DeepCopyInto(out *Disk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk. +func (in *Disk) DeepCopy() *Disk { + if in == nil { + return nil + } + out := new(Disk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(CPU) + **out = **in + } + if in.OSDisk != nil { + in, out := &in.OSDisk, &out.OSDisk + *out = new(Disk) + **out = **in + } + if in.AffinityGroupsNames != nil { + in, out := &in.AffinityGroupsNames, &out.AffinityGroupsNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Clone != nil { + in, out := &in.Clone, &out.Clone + *out = new(bool) + **out = **in + } + if in.Sparse != nil { + in, out := &in.Sparse, &out.Sparse + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.AffinityGroups != nil { + in, out := &in.AffinityGroups, &out.AffinityGroups + *out = make([]AffinityGroup, len(*in)) + copy(*out, *in) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(v1.OvirtPlatformLoadBalancer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/powervs/doc.go b/vendor/github.com/openshift/installer/pkg/types/powervs/doc.go index 7f0864590c0..cf8977c5a4e 100644 --- a/vendor/github.com/openshift/installer/pkg/types/powervs/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/powervs/doc.go @@ -1,5 +1,6 @@ // Package powervs contains Power VS-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package powervs // Name is name for the Power VS platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/powervs/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/powervs/zz_generated.deepcopy.go new file mode 100644 index 00000000000..adc91f5214b --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/powervs/zz_generated.deepcopy.go @@ -0,0 +1,133 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package powervs + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.VolumeIDs != nil { + in, out := &in.VolumeIDs, &out.VolumeIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Processors = in.Processors + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]v1.PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.VPCSubnets != nil { + in, out := &in.VPCSubnets, &out.VPCSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]v1.PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Region) DeepCopyInto(out *Region) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make(map[string]Zone, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.VPCZones != nil { + in, out := &in.VPCZones, &out.VPCZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Region. +func (in *Region) DeepCopy() *Region { + if in == nil { + return nil + } + out := new(Region) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Zone) DeepCopyInto(out *Zone) { + *out = *in + if in.SysTypes != nil { + in, out := &in.SysTypes, &out.SysTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zone. +func (in *Zone) DeepCopy() *Zone { + if in == nil { + return nil + } + out := new(Zone) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go b/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go index 0917509a013..c168fcaa0c0 100644 --- a/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go +++ b/vendor/github.com/openshift/installer/pkg/types/vsphere/doc.go @@ -1,5 +1,6 @@ // Package vsphere contains vSphere-specific structures for installer // configuration and management. +// +k8s:deepcopy-gen=package package vsphere // Name is name for the vsphere platform. diff --git a/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go b/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go index 7c1c81bf0c1..f8bed1eaf04 100644 --- a/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go +++ b/vendor/github.com/openshift/installer/pkg/types/vsphere/platform.go @@ -93,7 +93,6 @@ type Platform struct { // it contains an IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional APIVIPs []string `json:"apiVIPs,omitempty"` @@ -109,7 +108,6 @@ type Platform struct { // contains an IPv4 and IPv6 address, otherwise only one VIP // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:UniqueItems=true // +kubebuilder:validation:Format=ip // +optional IngressVIPs []string `json:"ingressVIPs,omitempty"` diff --git a/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go new file mode 100644 index 00000000000..7f529e47620 --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/vsphere/zz_generated.deepcopy.go @@ -0,0 +1,283 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package vsphere + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDisk) DeepCopyInto(out *DataDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk. +func (in *DataDisk) DeepCopy() *DataDisk { + if in == nil { + return nil + } + out := new(DataDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailureDomain) DeepCopyInto(out *FailureDomain) { + *out = *in + in.Topology.DeepCopyInto(&out.Topology) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDomain. +func (in *FailureDomain) DeepCopy() *FailureDomain { + if in == nil { + return nil + } + out := new(FailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Host) DeepCopyInto(out *Host) { + *out = *in + if in.NetworkDevice != nil { + in, out := &in.NetworkDevice, &out.NetworkDevice + *out = new(NetworkDeviceSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Host. +func (in *Host) DeepCopy() *Host { + if in == nil { + return nil + } + out := new(Host) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + out.OSDisk = in.OSDisk + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDisk, len(*in)) + copy(*out, *in) + } + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metadata) DeepCopyInto(out *Metadata) { + *out = *in + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VCenters, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata. +func (in *Metadata) DeepCopy() *Metadata { + if in == nil { + return nil + } + out := new(Metadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { + *out = *in + if in.IPAddrs != nil { + in, out := &in.IPAddrs, &out.IPAddrs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. +func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { + if in == nil { + return nil + } + out := new(NetworkDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.APIVIPs != nil { + in, out := &in.APIVIPs, &out.APIVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IngressVIPs != nil { + in, out := &in.IngressVIPs, &out.IngressVIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultMachinePlatform != nil { + in, out := &in.DefaultMachinePlatform, &out.DefaultMachinePlatform + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.VCenters != nil { + in, out := &in.VCenters, &out.VCenters + *out = make([]VCenter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make([]FailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeNetworking != nil { + in, out := &in.NodeNetworking, &out.NodeNetworking + *out = new(v1.VSpherePlatformNodeNetworking) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(v1.VSpherePlatformLoadBalancer) + **out = **in + } + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]*Host, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Host) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Topology) DeepCopyInto(out *Topology) { + *out = *in + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TagIDs != nil { + in, out := &in.TagIDs, &out.TagIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Topology. +func (in *Topology) DeepCopy() *Topology { + if in == nil { + return nil + } + out := new(Topology) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenter) DeepCopyInto(out *VCenter) { + *out = *in + if in.Datacenters != nil { + in, out := &in.Datacenters, &out.Datacenters + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenter. +func (in *VCenter) DeepCopy() *VCenter { + if in == nil { + return nil + } + out := new(VCenter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VCenters) DeepCopyInto(out *VCenters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VCenters. +func (in *VCenters) DeepCopy() *VCenters { + if in == nil { + return nil + } + out := new(VCenters) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/installer/pkg/types/zz_generated.deepcopy.go b/vendor/github.com/openshift/installer/pkg/types/zz_generated.deepcopy.go new file mode 100644 index 00000000000..a6f211cf3df --- /dev/null +++ b/vendor/github.com/openshift/installer/pkg/types/zz_generated.deepcopy.go @@ -0,0 +1,673 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package types + +import ( + v1 "github.com/openshift/api/config/v1" + ipnet "github.com/openshift/installer/pkg/ipnet" + aws "github.com/openshift/installer/pkg/types/aws" + azure "github.com/openshift/installer/pkg/types/azure" + baremetal "github.com/openshift/installer/pkg/types/baremetal" + external "github.com/openshift/installer/pkg/types/external" + gcp "github.com/openshift/installer/pkg/types/gcp" + ibmcloud "github.com/openshift/installer/pkg/types/ibmcloud" + none "github.com/openshift/installer/pkg/types/none" + nutanix "github.com/openshift/installer/pkg/types/nutanix" + openstack "github.com/openshift/installer/pkg/types/openstack" + ovirt "github.com/openshift/installer/pkg/types/ovirt" + powervs "github.com/openshift/installer/pkg/types/powervs" + vsphere "github.com/openshift/installer/pkg/types/vsphere" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapInPlace) DeepCopyInto(out *BootstrapInPlace) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapInPlace. +func (in *BootstrapInPlace) DeepCopy() *BootstrapInPlace { + if in == nil { + return nil + } + out := new(BootstrapInPlace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.AdditionalEnabledCapabilities != nil { + in, out := &in.AdditionalEnabledCapabilities, &out.AdditionalEnabledCapabilities + *out = make([]v1.ClusterVersionCapability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMetadata) DeepCopyInto(out *ClusterMetadata) { + *out = *in + in.ClusterPlatformMetadata.DeepCopyInto(&out.ClusterPlatformMetadata) + if in.CustomFeatureSet != nil { + in, out := &in.CustomFeatureSet, &out.CustomFeatureSet + *out = new(v1.CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMetadata. +func (in *ClusterMetadata) DeepCopy() *ClusterMetadata { + if in == nil { + return nil + } + out := new(ClusterMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + in.CIDR.DeepCopyInto(&out.CIDR) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterPlatformMetadata) DeepCopyInto(out *ClusterPlatformMetadata) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(aws.Metadata) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(openstack.Metadata) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(azure.Metadata) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.Metadata) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(ibmcloud.Metadata) + (*in).DeepCopyInto(*out) + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(baremetal.Metadata) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(ovirt.Metadata) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(powervs.Metadata) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(vsphere.Metadata) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(nutanix.Metadata) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPlatformMetadata. +func (in *ClusterPlatformMetadata) DeepCopy() *ClusterPlatformMetadata { + if in == nil { + return nil + } + out := new(ClusterPlatformMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterQuota) DeepCopyInto(out *ClusterQuota) { + *out = *in + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.Quota) + if **in != nil { + in, out := *in, *out + *out = make([]gcp.QuotaUsage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterQuota. +func (in *ClusterQuota) DeepCopy() *ClusterQuota { + if in == nil { + return nil + } + out := new(ClusterQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Credential) DeepCopyInto(out *Credential) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Credential. +func (in *Credential) DeepCopy() *Credential { + if in == nil { + return nil + } + out := new(Credential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fencing) DeepCopyInto(out *Fencing) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make([]*Credential, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Credential) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fencing. +func (in *Fencing) DeepCopy() *Fencing { + if in == nil { + return nil + } + out := new(Fencing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPv4OVNKubernetesConfig) DeepCopyInto(out *IPv4OVNKubernetesConfig) { + *out = *in + if in.InternalJoinSubnet != nil { + in, out := &in.InternalJoinSubnet, &out.InternalJoinSubnet + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4OVNKubernetesConfig. +func (in *IPv4OVNKubernetesConfig) DeepCopy() *IPv4OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(IPv4OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentSource) DeepCopyInto(out *ImageContentSource) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentSource. +func (in *ImageContentSource) DeepCopy() *ImageContentSource { + if in == nil { + return nil + } + out := new(ImageContentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageDigestSource) DeepCopyInto(out *ImageDigestSource) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageDigestSource. +func (in *ImageDigestSource) DeepCopy() *ImageDigestSource { + if in == nil { + return nil + } + out := new(ImageDigestSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstallConfig) DeepCopyInto(out *InstallConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Networking != nil { + in, out := &in.Networking, &out.Networking + *out = new(Networking) + (*in).DeepCopyInto(*out) + } + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.Arbiter != nil { + in, out := &in.Arbiter, &out.Arbiter + *out = new(MachinePool) + (*in).DeepCopyInto(*out) + } + if in.Compute != nil { + in, out := &in.Compute, &out.Compute + *out = make([]MachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Platform.DeepCopyInto(&out.Platform) + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = new(Proxy) + **out = **in + } + if in.DeprecatedImageContentSources != nil { + in, out := &in.DeprecatedImageContentSources, &out.DeprecatedImageContentSources + *out = make([]ImageContentSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageDigestSources != nil { + in, out := &in.ImageDigestSources, &out.ImageDigestSources + *out = make([]ImageDigestSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OperatorPublishingStrategy != nil { + in, out := &in.OperatorPublishingStrategy, &out.OperatorPublishingStrategy + *out = new(OperatorPublishingStrategy) + **out = **in + } + if in.BootstrapInPlace != nil { + in, out := &in.BootstrapInPlace, &out.BootstrapInPlace + *out = new(BootstrapInPlace) + **out = **in + } + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallConfig. +func (in *InstallConfig) DeepCopy() *InstallConfig { + if in == nil { + return nil + } + out := new(InstallConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineNetworkEntry) DeepCopyInto(out *MachineNetworkEntry) { + *out = *in + in.CIDR.DeepCopyInto(&out.CIDR) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineNetworkEntry. +func (in *MachineNetworkEntry) DeepCopy() *MachineNetworkEntry { + if in == nil { + return nil + } + out := new(MachineNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePool) DeepCopyInto(out *MachinePool) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int64) + **out = **in + } + in.Platform.DeepCopyInto(&out.Platform) + if in.Fencing != nil { + in, out := &in.Fencing, &out.Fencing + *out = new(Fencing) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool. +func (in *MachinePool) DeepCopy() *MachinePool { + if in == nil { + return nil + } + out := new(MachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolPlatform) DeepCopyInto(out *MachinePoolPlatform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(aws.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(azure.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(baremetal.MachinePool) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(ibmcloud.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(openstack.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(vsphere.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(ovirt.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(powervs.MachinePool) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(nutanix.MachinePool) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolPlatform. +func (in *MachinePoolPlatform) DeepCopy() *MachinePoolPlatform { + if in == nil { + return nil + } + out := new(MachinePoolPlatform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Networking) DeepCopyInto(out *Networking) { + *out = *in + if in.MachineNetwork != nil { + in, out := &in.MachineNetwork, &out.MachineNetwork + *out = make([]MachineNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]ipnet.IPNet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + if in.DeprecatedMachineCIDR != nil { + in, out := &in.DeprecatedMachineCIDR, &out.DeprecatedMachineCIDR + *out = (*in).DeepCopy() + } + if in.DeprecatedServiceCIDR != nil { + in, out := &in.DeprecatedServiceCIDR, &out.DeprecatedServiceCIDR + *out = (*in).DeepCopy() + } + if in.DeprecatedClusterNetworks != nil { + in, out := &in.DeprecatedClusterNetworks, &out.DeprecatedClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Networking. +func (in *Networking) DeepCopy() *Networking { + if in == nil { + return nil + } + out := new(Networking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.IPv4 != nil { + in, out := &in.IPv4, &out.IPv4 + *out = new(IPv4OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorPublishingStrategy) DeepCopyInto(out *OperatorPublishingStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorPublishingStrategy. +func (in *OperatorPublishingStrategy) DeepCopy() *OperatorPublishingStrategy { + if in == nil { + return nil + } + out := new(OperatorPublishingStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(aws.Platform) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(azure.Platform) + (*in).DeepCopyInto(*out) + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(baremetal.Platform) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(gcp.Platform) + (*in).DeepCopyInto(*out) + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(ibmcloud.Platform) + (*in).DeepCopyInto(*out) + } + if in.None != nil { + in, out := &in.None, &out.None + *out = new(none.Platform) + **out = **in + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(external.Platform) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(openstack.Platform) + (*in).DeepCopyInto(*out) + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(powervs.Platform) + (*in).DeepCopyInto(*out) + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(vsphere.Platform) + (*in).DeepCopyInto(*out) + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(ovirt.Platform) + (*in).DeepCopyInto(*out) + } + if in.Nutanix != nil { + in, out := &in.Nutanix, &out.Nutanix + *out = new(nutanix.Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4a948b571a3..4e2d03a1f51 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -642,6 +642,9 @@ github.com/blang/semver/v4 # github.com/blizzy78/varnamelen v0.8.0 ## explicit; go 1.16 github.com/blizzy78/varnamelen +# github.com/bombsimon/logrusr/v4 v4.1.0 +## explicit; go 1.17 +github.com/bombsimon/logrusr/v4 # github.com/bombsimon/wsl/v4 v4.5.0 ## explicit; go 1.22 github.com/bombsimon/wsl/v4 @@ -1860,7 +1863,7 @@ github.com/openshift/hive/apis/hive/v1/vsphere github.com/openshift/hive/apis/hivecontracts/v1alpha1 github.com/openshift/hive/apis/hiveinternal/v1alpha1 github.com/openshift/hive/apis/scheme -# github.com/openshift/installer v1.4.19-ec5 +# github.com/openshift/installer v1.4.19-ec5 => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca ## explicit; go 1.23.2 github.com/openshift/installer/data github.com/openshift/installer/pkg/asset @@ -4107,3 +4110,4 @@ sigs.k8s.io/yaml/goyaml.v3 # github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.2 # golang.org/x/net => golang.org/x/net v0.38.0 # sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.3 +# github.com/openshift/installer => github.com/dlom/installer v0.0.0-20251023182801-c056b7bdd6ca