From 2df0503af48d856397c2e10de59f1e5ba4324f18 Mon Sep 17 00:00:00 2001
From: Dale Smith
Date: Thu, 8 Feb 2024 17:00:58 +1300
Subject: [PATCH] Add OpenStackServerGroup CRD and Controller
Implements new CRD for OpenstackServerGroup in v1beta1 to allow managed
Server Groups with standard policies, and adds ServerGroupRef to OpenstackMachine
that references the new CRD and uses it for VM creation.
Closes: https://github.com/kubernetes-sigs/cluster-api-provider-openstack/issues/1256
---
PROJECT | 3 +
api/v1alpha6/zz_generated.conversion.go | 1 +
api/v1alpha7/zz_generated.conversion.go | 1 +
api/v1beta1/openstackmachine_types.go | 4 +
api/v1beta1/openstackservergroup_types.go | 72 ++++
api/v1beta1/types.go | 8 +-
api/v1beta1/zz_generated.deepcopy.go | 114 ++++++
...re.cluster.x-k8s.io_openstackclusters.yaml | 14 +-
...er.x-k8s.io_openstackclustertemplates.yaml | 12 +
...re.cluster.x-k8s.io_openstackmachines.yaml | 14 +-
...er.x-k8s.io_openstackmachinetemplates.yaml | 11 +
...luster.x-k8s.io_openstackservergroups.yaml | 85 ++++
config/crd/kustomization.yaml | 2 +
.../cainjection_in_openstackservergroups.yaml | 8 +
.../webhook_in_openstackservergroups.yaml | 17 +
.../openstackservergroup_editor_role.yaml | 24 ++
.../openstackservergroup_viewer_role.yaml | 20 +
config/rbac/role.yaml | 20 +
controllers/openstackcluster_controller.go | 99 +++--
.../openstackcluster_controller_test.go | 6 +-
controllers/openstackmachine_controller.go | 41 +-
.../openstackmachine_controller_test.go | 2 +-
.../openstackservergroup_controller.go | 215 +++++++++++
.../openstackservergroup_controller_test.go | 362 ++++++++++++++++++
docs/book/src/api/v1beta1/api.md | 243 +++++++++++-
main.go | 10 +
pkg/clients/compute.go | 31 ++
pkg/clients/mock/compute.go | 28 ++
pkg/cloud/services/compute/servergroup.go | 18 +-
pkg/scope/mock.go | 7 +
pkg/scope/provider.go | 23 ++
pkg/scope/scope.go | 3 +-
32 files changed, 1458 insertions(+), 60 deletions(-)
create mode 100644 api/v1beta1/openstackservergroup_types.go
create mode 100644 config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservergroups.yaml
create mode 100644 config/crd/patches/cainjection_in_openstackservergroups.yaml
create mode 100644 config/crd/patches/webhook_in_openstackservergroups.yaml
create mode 100644 config/rbac/openstackservergroup_editor_role.yaml
create mode 100644 config/rbac/openstackservergroup_viewer_role.yaml
create mode 100644 controllers/openstackservergroup_controller.go
create mode 100644 controllers/openstackservergroup_controller_test.go
diff --git a/PROJECT b/PROJECT
index 9db67d8145..9616ccb04d 100644
--- a/PROJECT
+++ b/PROJECT
@@ -44,4 +44,7 @@ resources:
- group: infrastructure
kind: OpenStackFloatingIPPool
version: v1alpha1
+- group: infrastructure
+ kind: OpenstackServerGroup
+ version: v1beta1
version: "2"
diff --git a/api/v1alpha6/zz_generated.conversion.go b/api/v1alpha6/zz_generated.conversion.go
index 8f75cb6e82..e53a3fba22 100644
--- a/api/v1alpha6/zz_generated.conversion.go
+++ b/api/v1alpha6/zz_generated.conversion.go
@@ -1257,6 +1257,7 @@ func autoConvert_v1beta1_OpenStackMachineSpec_To_v1alpha6_OpenStackMachineSpec(i
}
// WARNING: in.AdditionalBlockDevices requires manual conversion: does not exist in peer-type
// WARNING: in.ServerGroup requires manual conversion: does not exist in peer-type
+ // WARNING: in.ServerGroupRef requires manual conversion: does not exist in peer-type
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
*out = new(OpenStackIdentityReference)
diff --git a/api/v1alpha7/zz_generated.conversion.go b/api/v1alpha7/zz_generated.conversion.go
index 82777d6580..4d28692e61 100644
--- a/api/v1alpha7/zz_generated.conversion.go
+++ b/api/v1alpha7/zz_generated.conversion.go
@@ -1510,6 +1510,7 @@ func autoConvert_v1beta1_OpenStackMachineSpec_To_v1alpha7_OpenStackMachineSpec(i
out.AdditionalBlockDevices = nil
}
// WARNING: in.ServerGroup requires manual conversion: does not exist in peer-type
+ // WARNING: in.ServerGroupRef requires manual conversion: does not exist in peer-type
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
*out = new(OpenStackIdentityReference)
diff --git a/api/v1beta1/openstackmachine_types.go b/api/v1beta1/openstackmachine_types.go
index 4a6e798426..35c3d35da8 100644
--- a/api/v1beta1/openstackmachine_types.go
+++ b/api/v1beta1/openstackmachine_types.go
@@ -87,6 +87,10 @@ type OpenStackMachineSpec struct {
// +optional
ServerGroup *ServerGroupParam `json:"serverGroup,omitempty"`
+ // The server group ref to assign the machine to.
+ // +optional
+ ServerGroupRef *ServerGroupRef `json:"serverGroupRef,omitempty"`
+
// IdentityRef is a reference to a secret holding OpenStack credentials
// to be used when reconciling this machine. If not specified, the
// credentials specified in the cluster will be used.
diff --git a/api/v1beta1/openstackservergroup_types.go b/api/v1beta1/openstackservergroup_types.go
new file mode 100644
index 0000000000..afd2f3f0a7
--- /dev/null
+++ b/api/v1beta1/openstackservergroup_types.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+ // ServerGroupFinalizer allows ReconcileOpenStackServerGroup to clean up OpenStack resources associated with OpenStackServerGroup before
+ // removing it from the apiserver.
+ ServerGroupFinalizer = "openstackservergroup.infrastructure.cluster.x-k8s.io"
+)
+
+// OpenStackServerGroupSpec defines the desired state of OpenStackServerGroup.
+type OpenStackServerGroupSpec struct {
+ // Policy is a string with some valid values; affinity, anti-affinity, soft-affinity, soft-anti-affinity.
+ Policy string `json:"policy"`
+
+ // IdentityRef is a reference to a identity to be used when reconciling this resource
+ // +optional
+ IdentityRef *OpenStackIdentityReference `json:"identityRef,omitempty"`
+}
+
+// OpenStackServerGroupStatus defines the observed state of OpenStackServerGroup.
+type OpenStackServerGroupStatus struct {
+ // Ready is true when the resource is created.
+ // +optional
+ Ready bool `json:"ready"`
+
+ // UUID of provisioned ServerGroup
+ ID string `json:"uuid"`
+}
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+
+// OpenStackServerGroup is the Schema for the openstackservergroups API.
+type OpenStackServerGroup struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OpenStackServerGroupSpec `json:"spec,omitempty"`
+ Status OpenStackServerGroupStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// OpenStackServerGroupList contains a list of OpenStackServerGroup.
+type OpenStackServerGroupList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []OpenStackServerGroup `json:"items"`
+}
+
+func init() {
+ objectTypes = append(objectTypes, &OpenStackServerGroup{}, &OpenStackServerGroupList{})
+}
diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go
index 49e654bfd3..bc3dde7520 100644
--- a/api/v1beta1/types.go
+++ b/api/v1beta1/types.go
@@ -613,6 +613,12 @@ func (f *ServerGroupFilter) IsZero() bool {
return f.Name == nil
}
+type ServerGroupRef struct {
+ // Name of the OpenStackServerGroup resource to be used.
+ // Must be in the same namespace as the resource(s) being provisioned.
+ Name string `json:"name"`
+}
+
// BlockDeviceType defines the type of block device to create.
type BlockDeviceType string
@@ -881,7 +887,7 @@ func (s *APIServerLoadBalancer) IsEnabled() bool {
// ResolvedMachineSpec contains resolved references to resources required by the machine.
type ResolvedMachineSpec struct {
- // ServerGroupID is the ID of the server group the machine should be added to and is calculated based on ServerGroupFilter.
+ // ServerGroupID is the ID of the server group the machine should be added to and is calculated based on ServerGroupFilter and ServerGroupRef.
// +optional
ServerGroupID string `json:"serverGroupID,omitempty"`
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index 83b7a42e2d..b69f45daa0 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1014,6 +1014,11 @@ func (in *OpenStackMachineSpec) DeepCopyInto(out *OpenStackMachineSpec) {
*out = new(ServerGroupParam)
(*in).DeepCopyInto(*out)
}
+ if in.ServerGroupRef != nil {
+ in, out := &in.ServerGroupRef, &out.ServerGroupRef
+ *out = new(ServerGroupRef)
+ **out = **in
+ }
if in.IdentityRef != nil {
in, out := &in.IdentityRef, &out.IdentityRef
*out = new(OpenStackIdentityReference)
@@ -1183,6 +1188,100 @@ func (in *OpenStackMachineTemplateSpec) DeepCopy() *OpenStackMachineTemplateSpec
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackServerGroup) DeepCopyInto(out *OpenStackServerGroup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerGroup.
+func (in *OpenStackServerGroup) DeepCopy() *OpenStackServerGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackServerGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenStackServerGroup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackServerGroupList) DeepCopyInto(out *OpenStackServerGroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OpenStackServerGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerGroupList.
+func (in *OpenStackServerGroupList) DeepCopy() *OpenStackServerGroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackServerGroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OpenStackServerGroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackServerGroupSpec) DeepCopyInto(out *OpenStackServerGroupSpec) {
+ *out = *in
+ if in.IdentityRef != nil {
+ in, out := &in.IdentityRef, &out.IdentityRef
+ *out = new(OpenStackIdentityReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerGroupSpec.
+func (in *OpenStackServerGroupSpec) DeepCopy() *OpenStackServerGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackServerGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OpenStackServerGroupStatus) DeepCopyInto(out *OpenStackServerGroupStatus) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackServerGroupStatus.
+func (in *OpenStackServerGroupStatus) DeepCopy() *OpenStackServerGroupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenStackServerGroupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortOpts) DeepCopyInto(out *PortOpts) {
*out = *in
@@ -1638,6 +1737,21 @@ func (in *ServerGroupParam) DeepCopy() *ServerGroupParam {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServerGroupRef) DeepCopyInto(out *ServerGroupRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupRef.
+func (in *ServerGroupRef) DeepCopy() *ServerGroupRef {
+ if in == nil {
+ return nil
+ }
+ out := new(ServerGroupRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerMetadata) DeepCopyInto(out *ServerMetadata) {
*out = *in
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml
index be5b113572..f0c69dbd3e 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml
@@ -4083,6 +4083,17 @@ spec:
format: uuid
type: string
type: object
+ serverGroupRef:
+ description: The server group ref to assign the machine to.
+ properties:
+ name:
+ description: |-
+ Name of the OpenStackServerGroup resource to be used.
+ Must be in the same namespace as the resource(s) being provisioned.
+ type: string
+ required:
+ - name
+ type: object
serverMetadata:
description: Metadata mapping. Allows you to create a map
of key value pairs to add to the server instance.
@@ -5077,7 +5088,8 @@ spec:
type: array
serverGroupID:
description: ServerGroupID is the ID of the server group the
- machine should be added to and is calculated based on ServerGroupFilter.
+ machine should be added to and is calculated based on ServerGroupFilter
+ and ServerGroupRef.
type: string
type: object
resources:
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml
index eb75f7731e..dcb2f6d61b 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml
@@ -2581,6 +2581,18 @@ spec:
format: uuid
type: string
type: object
+ serverGroupRef:
+ description: The server group ref to assign the machine
+ to.
+ properties:
+ name:
+ description: |-
+ Name of the OpenStackServerGroup resource to be used.
+ Must be in the same namespace as the resource(s) being provisioned.
+ type: string
+ required:
+ - name
+ type: object
serverMetadata:
description: Metadata mapping. Allows you to create
a map of key value pairs to add to the server instance.
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml
index 7bfc99e50a..9739b58503 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachines.yaml
@@ -1899,6 +1899,17 @@ spec:
format: uuid
type: string
type: object
+ serverGroupRef:
+ description: The server group ref to assign the machine to.
+ properties:
+ name:
+ description: |-
+ Name of the OpenStackServerGroup resource to be used.
+ Must be in the same namespace as the resource(s) being provisioned.
+ type: string
+ required:
+ - name
+ type: object
serverMetadata:
description: Metadata mapping. Allows you to create a map of key value
pairs to add to the server instance.
@@ -2225,7 +2236,8 @@ spec:
type: array
serverGroupID:
description: ServerGroupID is the ID of the server group the machine
- should be added to and is calculated based on ServerGroupFilter.
+ should be added to and is calculated based on ServerGroupFilter
+ and ServerGroupRef.
type: string
type: object
resources:
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml
index 611a03b6c7..c0ec967b24 100644
--- a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml
@@ -1685,6 +1685,17 @@ spec:
format: uuid
type: string
type: object
+ serverGroupRef:
+ description: The server group ref to assign the machine to.
+ properties:
+ name:
+ description: |-
+ Name of the OpenStackServerGroup resource to be used.
+ Must be in the same namespace as the resource(s) being provisioned.
+ type: string
+ required:
+ - name
+ type: object
serverMetadata:
description: Metadata mapping. Allows you to create a map
of key value pairs to add to the server instance.
diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservergroups.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservergroups.yaml
new file mode 100644
index 0000000000..d6233481b7
--- /dev/null
+++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_openstackservergroups.yaml
@@ -0,0 +1,85 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: openstackservergroups.infrastructure.cluster.x-k8s.io
+spec:
+ group: infrastructure.cluster.x-k8s.io
+ names:
+ kind: OpenStackServerGroup
+ listKind: OpenStackServerGroupList
+ plural: openstackservergroups
+ singular: openstackservergroup
+ scope: Namespaced
+ versions:
+ - name: v1beta1
+ schema:
+ openAPIV3Schema:
+ description: OpenStackServerGroup is the Schema for the openstackservergroups
+ API.
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OpenStackServerGroupSpec defines the desired state of OpenStackServerGroup.
+ properties:
+ identityRef:
+ description: IdentityRef is a reference to a identity to be used when
+ reconciling this resource
+ properties:
+ cloudName:
+ description: CloudName specifies the name of the entry in the
+ clouds.yaml file to use.
+ type: string
+ name:
+ description: |-
+ Name is the name of a secret in the same namespace as the resource being provisioned.
+ The secret must contain a key named `clouds.yaml` which contains an OpenStack clouds.yaml file.
+ The secret may optionally contain a key named `cacert` containing a PEM-encoded CA certificate.
+ type: string
+ required:
+ - cloudName
+ - name
+ type: object
+ policy:
+ description: Policy is a string with some valid values; affinity,
+ anti-affinity, soft-affinity, soft-anti-affinity.
+ type: string
+ required:
+ - policy
+ type: object
+ status:
+ description: OpenStackServerGroupStatus defines the observed state of
+ OpenStackServerGroup.
+ properties:
+ ready:
+ description: Ready is true when the resource is created.
+ type: boolean
+ uuid:
+ description: UUID of provisioned ServerGroup
+ type: string
+ required:
+ - uuid
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index a4c9e050cf..25c2a21cb8 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -10,6 +10,7 @@ resources:
- bases/infrastructure.cluster.x-k8s.io_openstackmachinetemplates.yaml
- bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml
- bases/infrastructure.cluster.x-k8s.io_openstackfloatingippools.yaml
+- bases/infrastructure.cluster.x-k8s.io_openstackservergroups.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patches:
@@ -20,6 +21,7 @@ patches:
- path: patches/webhook_in_openstackmachinetemplates.yaml
- path: patches/webhook_in_openstackclustertemplates.yaml
#- patches/webhook_in_openstackfloatingippools.yaml
+#- patches/webhook_in_openstackservergroups.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
diff --git a/config/crd/patches/cainjection_in_openstackservergroups.yaml b/config/crd/patches/cainjection_in_openstackservergroups.yaml
new file mode 100644
index 0000000000..cdf5cd763c
--- /dev/null
+++ b/config/crd/patches/cainjection_in_openstackservergroups.yaml
@@ -0,0 +1,8 @@
+# The following patch adds a directive for certmanager to inject CA into the CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
+ name: openstackservergroups.infrastructure.cluster.x-k8s.io
diff --git a/config/crd/patches/webhook_in_openstackservergroups.yaml b/config/crd/patches/webhook_in_openstackservergroups.yaml
new file mode 100644
index 0000000000..83eaa4e206
--- /dev/null
+++ b/config/crd/patches/webhook_in_openstackservergroups.yaml
@@ -0,0 +1,17 @@
+# The following patch enables conversion webhook for CRD
+# CRD conversion requires k8s 1.13 or later.
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: openstackservergroups.infrastructure.cluster.x-k8s.io
+spec:
+ conversion:
+ strategy: Webhook
+ webhookClientConfig:
+ # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
+ # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
+ caBundle: Cg==
+ service:
+ namespace: system
+ name: webhook-service
+ path: /convert
diff --git a/config/rbac/openstackservergroup_editor_role.yaml b/config/rbac/openstackservergroup_editor_role.yaml
new file mode 100644
index 0000000000..9309a024dc
--- /dev/null
+++ b/config/rbac/openstackservergroup_editor_role.yaml
@@ -0,0 +1,24 @@
+# permissions for end users to edit openstackservergroups.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: openstackservergroup-editor-role
+rules:
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups/status
+ verbs:
+ - get
diff --git a/config/rbac/openstackservergroup_viewer_role.yaml b/config/rbac/openstackservergroup_viewer_role.yaml
new file mode 100644
index 0000000000..3a120eae83
--- /dev/null
+++ b/config/rbac/openstackservergroup_viewer_role.yaml
@@ -0,0 +1,20 @@
+# permissions for end users to view openstackservergroups.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: openstackservergroup-viewer-role
+rules:
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 4790f87801..9bf4912704 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -113,6 +113,26 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - infrastructure.cluster.x-k8s.io
+ resources:
+ - openstackservergroups/status
+ verbs:
+ - get
+ - patch
+ - update
- apiGroups:
- ipam.cluster.x-k8s.io
resources:
diff --git a/controllers/openstackcluster_controller.go b/controllers/openstackcluster_controller.go
index d36a0acf9f..0192ac5b4c 100644
--- a/controllers/openstackcluster_controller.go
+++ b/controllers/openstackcluster_controller.go
@@ -124,14 +124,14 @@ func (r *OpenStackClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req
// Handle deleted clusters
if !openStackCluster.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(ctx, scope, cluster, openStackCluster)
+ return r.reconcileDelete(ctx, r.Client, scope, cluster, openStackCluster)
}
// Handle non-deleted clusters
- return reconcileNormal(scope, cluster, openStackCluster)
+ return reconcileNormal(ctx, r.Client, scope, cluster, openStackCluster)
}
-func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
+func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, ctrlClient client.Client, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) {
scope.Logger().Info("Reconciling Cluster delete")
// Wait for machines to be deleted before removing the finalizer as they
@@ -154,7 +154,7 @@ func (r *OpenStackClusterReconciler) reconcileDelete(ctx context.Context, scope
// We attempt to delete it even if no status was written, just in case
if openStackCluster.Status.Network != nil {
// Attempt to resolve bastion resources before delete. We don't need to worry about starting if the resources have changed on update.
- if _, err := resolveBastionResources(scope, clusterResourceName, openStackCluster); err != nil {
+ if _, err := resolveBastionResources(ctx, ctrlClient, scope, clusterResourceName, openStackCluster); err != nil {
return reconcile.Result{}, err
}
@@ -218,43 +218,64 @@ func contains(arr []string, target string) bool {
return false
}
-func resolveBastionResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster) (bool, error) {
+func resolveBastionResources(ctx context.Context, ctrlClient client.Client, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster) (bool, error) {
// Resolve and store resources for the bastion
- if openStackCluster.Spec.Bastion.IsEnabled() {
- if openStackCluster.Status.Bastion == nil {
- openStackCluster.Status.Bastion = &infrav1.BastionStatus{}
- }
- if openStackCluster.Spec.Bastion.Spec == nil {
- return false, fmt.Errorf("bastion spec is nil when bastion is enabled, this shouldn't happen")
- }
- resolved := openStackCluster.Status.Bastion.Resolved
- if resolved == nil {
- resolved = &infrav1.ResolvedMachineSpec{}
- openStackCluster.Status.Bastion.Resolved = resolved
- }
- changed, err := compute.ResolveMachineSpec(scope,
- openStackCluster.Spec.Bastion.Spec, resolved,
- clusterResourceName, bastionName(clusterResourceName),
- openStackCluster, getBastionSecurityGroupID(openStackCluster))
+ if !openStackCluster.Spec.Bastion.IsEnabled() {
+ return false, nil
+ }
+ if openStackCluster.Status.Bastion == nil {
+ openStackCluster.Status.Bastion = &infrav1.BastionStatus{}
+ }
+ bastionSpec := openStackCluster.Spec.Bastion.Spec
+ if bastionSpec == nil {
+ return false, fmt.Errorf("bastion spec is nil when bastion is enabled, this shouldn't happen")
+ }
+ resolved := openStackCluster.Status.Bastion.Resolved
+ if resolved == nil {
+ resolved = &infrav1.ResolvedMachineSpec{}
+ openStackCluster.Status.Bastion.Resolved = resolved
+ }
+ // Resolve resources from OpenStack
+ changed, err := compute.ResolveMachineSpec(scope,
+ bastionSpec, resolved,
+ clusterResourceName, bastionName(clusterResourceName),
+ openStackCluster, getBastionSecurityGroupID(openStackCluster))
+ if err != nil {
+ return false, err
+ }
+
+ // Resolve the OpenStackServerGroup Reference using K8sClient (lower priority than spec.ServerGroup)
+ if resolved.ServerGroupID == "" && bastionSpec.ServerGroupRef != nil && bastionSpec.ServerGroupRef.Name != "" {
+ servergroup := &infrav1.OpenStackServerGroup{}
+
+ // Get OpenStackServerGroup resource from K8s
+ err := ctrlClient.Get(ctx, client.ObjectKey{
+ Namespace: openStackCluster.Namespace,
+ Name: bastionSpec.ServerGroupRef.Name,
+ }, servergroup)
if err != nil {
- return false, err
- }
- if changed {
- // If the resolved machine spec changed we need to restart the reconcile to avoid inconsistencies between reconciles.
- return true, nil
- }
- resources := openStackCluster.Status.Bastion.Resources
- if resources == nil {
- resources = &infrav1.MachineResources{}
- openStackCluster.Status.Bastion.Resources = resources
+ return changed, err
}
- err = compute.AdoptMachineResources(scope, resolved, resources)
- if err != nil {
- return false, err
+ // Store the resolved UUID, once it's ready and set.
+ if servergroup.Status.Ready && servergroup.Status.ID != "" {
+ resolved.ServerGroupID = servergroup.Status.ID
+ changed = true
}
}
- return false, nil
+
+ if changed {
+ // If the resolved machine spec changed we need to restart the reconcile to avoid inconsistencies between reconciles.
+ return true, nil
+ }
+ resources := openStackCluster.Status.Bastion.Resources
+ if resources == nil {
+ resources = &infrav1.MachineResources{}
+ openStackCluster.Status.Bastion.Resources = resources
+ }
+
+ err = compute.AdoptMachineResources(scope, resolved, resources)
+ return false, err
}
func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) error {
@@ -345,7 +366,7 @@ func deleteBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStac
return nil
}
-func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam
+func reconcileNormal(ctx context.Context, ctrlClient client.Client, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (ctrl.Result, error) { //nolint:unparam
scope.Logger().Info("Reconciling Cluster")
// If the OpenStackCluster doesn't have our finalizer, add it.
@@ -364,7 +385,7 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt
return reconcile.Result{}, err
}
- result, err := reconcileBastion(scope, cluster, openStackCluster)
+ result, err := reconcileBastion(ctx, ctrlClient, scope, cluster, openStackCluster)
if err != nil {
return reconcile.Result{}, err
}
@@ -399,11 +420,11 @@ func reconcileNormal(scope *scope.WithLogger, cluster *clusterv1.Cluster, openSt
return reconcile.Result{}, nil
}
-func reconcileBastion(scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) {
+func reconcileBastion(ctx context.Context, ctrlClient client.Client, scope *scope.WithLogger, cluster *clusterv1.Cluster, openStackCluster *infrav1.OpenStackCluster) (*ctrl.Result, error) {
scope.Logger().V(4).Info("Reconciling Bastion")
clusterResourceName := names.ClusterResourceName(cluster)
- changed, err := resolveBastionResources(scope, clusterResourceName, openStackCluster)
+ changed, err := resolveBastionResources(ctx, ctrlClient, scope, clusterResourceName, openStackCluster)
if err != nil {
return nil, err
}
diff --git a/controllers/openstackcluster_controller_test.go b/controllers/openstackcluster_controller_test.go
index 64f6674149..777f23e84a 100644
--- a/controllers/openstackcluster_controller_test.go
+++ b/controllers/openstackcluster_controller_test.go
@@ -286,7 +286,7 @@ var _ = Describe("OpenStackCluster controller", func() {
networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return(make([]floatingips.FloatingIP, 1), nil)
- res, err := reconcileBastion(scope, capiCluster, testCluster)
+ res, err := reconcileBastion(ctx, k8sClient, scope, capiCluster, testCluster)
expectedStatus := &infrav1.BastionStatus{
ID: "adopted-bastion-uuid",
State: "ACTIVE",
@@ -368,7 +368,7 @@ var _ = Describe("OpenStackCluster controller", func() {
networkClientRecorder.ListFloatingIP(floatingips.ListOpts{PortID: "portID1"}).Return([]floatingips.FloatingIP{{FloatingIP: "1.2.3.4"}}, nil)
- res, err := reconcileBastion(scope, capiCluster, testCluster)
+ res, err := reconcileBastion(ctx, k8sClient, scope, capiCluster, testCluster)
Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{
ID: "adopted-fip-bastion-uuid",
FloatingIP: "1.2.3.4",
@@ -445,7 +445,7 @@ var _ = Describe("OpenStackCluster controller", func() {
computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
computeClientRecorder.GetServer("requeue-bastion-uuid").Return(&server, nil)
- res, err := reconcileBastion(scope, capiCluster, testCluster)
+ res, err := reconcileBastion(ctx, k8sClient, scope, capiCluster, testCluster)
Expect(testCluster.Status.Bastion).To(Equal(&infrav1.BastionStatus{
ID: "requeue-bastion-uuid",
State: "BUILD",
diff --git a/controllers/openstackmachine_controller.go b/controllers/openstackmachine_controller.go
index 230ecd5342..aa7f903ad6 100644
--- a/controllers/openstackmachine_controller.go
+++ b/controllers/openstackmachine_controller.go
@@ -155,24 +155,49 @@ func (r *OpenStackMachineReconciler) Reconcile(ctx context.Context, req ctrl.Req
// Handle deleted machines
if !openStackMachine.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(scope, clusterResourceName, infraCluster, machine, openStackMachine)
+ return r.reconcileDelete(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine)
}
// Handle non-deleted clusters
return r.reconcileNormal(ctx, scope, clusterResourceName, infraCluster, machine, openStackMachine)
}
-func resolveMachineResources(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (bool, error) {
+func resolveMachineResources(ctx context.Context, ctrlClient client.Client, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, openStackMachine *infrav1.OpenStackMachine, machine *clusterv1.Machine) (bool, error) {
resolved := openStackMachine.Status.Resolved
if resolved == nil {
resolved = &infrav1.ResolvedMachineSpec{}
openStackMachine.Status.Resolved = resolved
}
- // Resolve and store resources
- return compute.ResolveMachineSpec(scope,
+
+ // Resolve resources from OpenStack
+ changed, err := compute.ResolveMachineSpec(scope,
&openStackMachine.Spec, resolved,
clusterResourceName, openStackMachine.Name,
openStackCluster, getManagedSecurityGroup(openStackCluster, machine))
+ if err != nil {
+ return changed, err
+ }
+
+ // Resolve the OpenStackServerGroup Reference using K8sClient (lower priority than spec.ServerGroup)
+ if resolved.ServerGroupID == "" && openStackMachine.Spec.ServerGroupRef != nil && openStackMachine.Spec.ServerGroupRef.Name != "" {
+ servergroup := &infrav1.OpenStackServerGroup{}
+
+ // Get OpenStackServerGroup resource from K8s
+ err := ctrlClient.Get(ctx, client.ObjectKey{
+ Namespace: openStackCluster.Namespace,
+ Name: openStackMachine.Spec.ServerGroupRef.Name,
+ }, servergroup)
+ if err != nil {
+ return changed, err
+ }
+
+ // Store the resolved UUID, once it's ready and set.
+ if servergroup.Status.Ready && servergroup.Status.ID != "" {
+ resolved.ServerGroupID = servergroup.Status.ID
+ changed = true
+ }
+ }
+ return changed, err
}
func adoptMachineResources(scope *scope.WithLogger, openStackMachine *infrav1.OpenStackMachine) error {
@@ -238,7 +263,7 @@ func (r *OpenStackMachineReconciler) SetupWithManager(ctx context.Context, mgr c
Complete(r)
}
-func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam
+func (r *OpenStackMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.WithLogger, clusterResourceName string, openStackCluster *infrav1.OpenStackCluster, machine *clusterv1.Machine, openStackMachine *infrav1.OpenStackMachine) (ctrl.Result, error) { //nolint:unparam
scope.Logger().Info("Reconciling Machine delete")
computeService, err := compute.NewService(scope)
@@ -276,7 +301,7 @@ func (r *OpenStackMachineReconciler) reconcileDelete(scope *scope.WithLogger, cl
// This code can and should be deleted in a future release when we are
// sure that all machines have been reconciled at least by a v0.10 or
// later controller.
- if _, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine); err != nil {
+ if _, err := resolveMachineResources(ctx, r.Client, scope, clusterResourceName, openStackCluster, openStackMachine, machine); err != nil {
// Return the error, but allow the resource to be removed anyway.
controllerutil.RemoveFinalizer(openStackMachine, infrav1.MachineFinalizer)
return ctrl.Result{}, err
@@ -552,7 +577,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
return ctrl.Result{}, nil
}
- changed, err := resolveMachineResources(scope, clusterResourceName, openStackCluster, openStackMachine, machine)
+ changed, err := resolveMachineResources(ctx, r.Client, scope, clusterResourceName, openStackCluster, openStackMachine, machine)
if err != nil {
return ctrl.Result{}, err
}
@@ -667,7 +692,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State())
return ctrl.Result{RequeueAfter: waitForBuildingInstanceToReconcile}, nil
default:
- // The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE
+ // The other state is normal (for example; migrating, shutoff) but we don't want to proceed until it's ACTIVE
// due to potential conflict or unexpected actions
scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State())
conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State())
diff --git a/controllers/openstackmachine_controller_test.go b/controllers/openstackmachine_controller_test.go
index a4564396e4..6b94043912 100644
--- a/controllers/openstackmachine_controller_test.go
+++ b/controllers/openstackmachine_controller_test.go
@@ -556,7 +556,7 @@ func Test_reconcileDelete(t *testing.T) {
osMachine.Name = openStackMachineName
osMachine.Finalizers = []string{infrav1.MachineFinalizer}
- _, err := reconciler.reconcileDelete(scopeWithLogger, openStackMachineName, &openStackCluster, &machine, &tt.osMachine)
+ _, err := reconciler.reconcileDelete(ctx, scopeWithLogger, openStackMachineName, &openStackCluster, &machine, &tt.osMachine)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
diff --git a/controllers/openstackservergroup_controller.go b/controllers/openstackservergroup_controller.go
new file mode 100644
index 0000000000..2b7c0a3679
--- /dev/null
+++ b/controllers/openstackservergroup_controller.go
@@ -0,0 +1,215 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "reflect"
+
+ "github.com/go-logr/logr"
+ "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ kerrors "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/client-go/tools/record"
+ "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/patch"
+ "sigs.k8s.io/cluster-api/util/predicates"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/builder"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/event"
+ "sigs.k8s.io/controller-runtime/pkg/predicate"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-openstack/pkg/cloud/services/compute"
+ "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
+ capoerrors "sigs.k8s.io/cluster-api-provider-openstack/pkg/utils/errors"
+)
+
+// OpenStackServerGroupReconciler reconciles a OpenstackServerGroup object.
+type OpenStackServerGroupReconciler struct {
+ client.Client
+ Recorder record.EventRecorder
+ WatchFilterValue string
+ ScopeFactory scope.Factory
+ CaCertificates []byte // PEM encoded ca certificates.
+}
+
+//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservergroups,verbs=get;list;watch;create;update;patch;delete
+//+kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=openstackservergroups/status,verbs=get;update;patch
+
+func (r *OpenStackServerGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, reterr error) {
+ log := ctrl.LoggerFrom(ctx)
+
+ // Fetch the OpenStackServerGroup instance
+ openStackServerGroup := &infrav1.OpenStackServerGroup{}
+ err := r.Client.Get(ctx, req.NamespacedName, openStackServerGroup)
+ if err != nil {
+ if apierrors.IsNotFound(err) {
+ return ctrl.Result{}, nil
+ }
+ return ctrl.Result{}, err
+ }
+
+ log = log.WithValues("openStackServerGroup", openStackServerGroup.Name)
+
+ log.Info("OpenStackServerGroup is about to reconcile")
+
+ if annotations.HasPaused(openStackServerGroup) {
+ log.Info("OpenStackServerGroup is marked as paused. Won't reconcile")
+ return ctrl.Result{}, nil
+ }
+
+ // Initialize the patch helper
+ patchHelper, err := patch.NewHelper(openStackServerGroup, r.Client)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // Always patch the openStackServerGroup when exiting this function so we can persist any changes.
+ defer func() {
+ if err := patchServerGroup(ctx, patchHelper, openStackServerGroup); err != nil {
+ result = ctrl.Result{}
+ reterr = kerrors.NewAggregate([]error{reterr, err})
+ }
+ }()
+
+ clientScope, err := r.ScopeFactory.NewClientScopeFromServerGroup(ctx, r.Client, openStackServerGroup, r.CaCertificates, log)
+ scope := scope.NewWithLogger(clientScope, log)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ // Handle deleted servergroups
+ if !openStackServerGroup.DeletionTimestamp.IsZero() {
+ return r.reconcileDelete(scope, openStackServerGroup)
+ }
+
+ // Handle non-deleted servergroups
+ return r.reconcileNormal(scope, openStackServerGroup)
+}
+
+func patchServerGroup(ctx context.Context, patchHelper *patch.Helper, openStackServerGroup *infrav1.OpenStackServerGroup, options ...patch.Option) error {
+ return patchHelper.Patch(ctx, openStackServerGroup, options...)
+}
+
+func (r *OpenStackServerGroupReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(
+ &infrav1.OpenStackServerGroup{},
+ builder.WithPredicates(
+ predicate.Funcs{
+ // Avoid reconciling if the event triggering the reconciliation is related to incremental status updates
+ UpdateFunc: func(e event.UpdateEvent) bool {
+ oldServerGroup := e.ObjectOld.(*infrav1.OpenStackServerGroup).DeepCopy()
+ newServerGroup := e.ObjectNew.(*infrav1.OpenStackServerGroup).DeepCopy()
+ oldServerGroup.Status = infrav1.OpenStackServerGroupStatus{}
+ newServerGroup.Status = infrav1.OpenStackServerGroupStatus{}
+ oldServerGroup.ObjectMeta.ResourceVersion = ""
+ newServerGroup.ObjectMeta.ResourceVersion = ""
+ return !reflect.DeepEqual(oldServerGroup, newServerGroup)
+ },
+ },
+ ),
+ ).
+ WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)).
+ Complete(r)
+}
+
+func (r *OpenStackServerGroupReconciler) reconcileDelete(scope *scope.WithLogger, openStackServerGroup *infrav1.OpenStackServerGroup) (result ctrl.Result, reterr error) { //nolint:unparam
+ scope.Logger().Info("Reconciling OpenStackServerGroup delete")
+
+ computeService, err := compute.NewService(scope)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ serverGroupID := openStackServerGroup.Status.ID
+ if serverGroupID != "" {
+ err = computeService.DeleteServerGroup(serverGroupID)
+ if err != nil {
+ if capoerrors.IsNotFound(err) {
+ scope.Logger().Info("Server Group did not exist in OpenStack. Treating deletion as success")
+ } else {
+ return ctrl.Result{}, err
+ }
+ }
+ }
+
+ controllerutil.RemoveFinalizer(openStackServerGroup, infrav1.ServerGroupFinalizer)
+ scope.Logger().Info("Reconciled OpenStackServerGroup delete successfully")
+ return ctrl.Result{}, nil
+}
+
+func (r *OpenStackServerGroupReconciler) reconcileNormal(scope *scope.WithLogger, openStackServerGroup *infrav1.OpenStackServerGroup) (result ctrl.Result, reterr error) { //nolint:unparam
+ // If the OpenStackServerGroup doesn't have our finalizer, add it.
+ if controllerutil.AddFinalizer(openStackServerGroup, infrav1.ServerGroupFinalizer) {
+ scope.Logger().Info("Reconciling ServerGroup: Adding finalizer")
+ // Register the finalizer immediately to avoid orphaning OpenStack resources on delete
+ // NOTE(dalees): This return without Requeue relies on patchServerGroup to persist the change, and the watch triggers an immediate reconcile.
+ return ctrl.Result{}, nil
+ }
+
+ scope.Logger().Info("Reconciling OpenStackServerGroup")
+
+ computeService, err := compute.NewService(scope)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+
+ serverGroupStatus, err := r.getOrCreate(scope.Logger(), openStackServerGroup, computeService)
+ if err != nil || serverGroupStatus == nil {
+ return ctrl.Result{}, err
+ }
+
+ // Update the resource with any new information.
+ openStackServerGroup.Status.Ready = true
+ openStackServerGroup.Status.ID = serverGroupStatus.ID
+
+ scope.Logger().Info("Reconciled OpenStackServerGroup successfully")
+ return ctrl.Result{}, nil
+}
+
+func (r *OpenStackServerGroupReconciler) getOrCreate(logger logr.Logger, openStackServerGroup *infrav1.OpenStackServerGroup, computeService *compute.Service) (*servergroups.ServerGroup, error) {
+ serverGroupName := openStackServerGroup.Name
+
+ serverGroup, err := computeService.GetServerGroupByName(serverGroupName, false)
+ if err != nil && serverGroup != nil {
+ // More than one server group was found with the same name.
+ // We should not create another, nor should we use the first found.
+ return nil, err
+ }
+ if err == nil {
+ return serverGroup, nil
+ }
+
+ logger.Info("Unable to get OpenStackServerGroup instance, we need to create it.", "name", serverGroupName, "policy", openStackServerGroup.Spec.Policy)
+
+ err = computeService.CreateServerGroup(serverGroupName, openStackServerGroup.Spec.Policy)
+ if err != nil {
+ return nil, err
+ }
+
+ // Perform another GET after creation; Gophercloud doesn't give us the created ServerGroup ID on create
+ serverGroup, err = computeService.GetServerGroupByName(serverGroupName, false)
+ if err != nil {
+ return nil, err
+ }
+
+ return serverGroup, err
+}
diff --git a/controllers/openstackservergroup_controller_test.go b/controllers/openstackservergroup_controller_test.go
new file mode 100644
index 0000000000..5985588f9a
--- /dev/null
+++ b/controllers/openstackservergroup_controller_test.go
@@ -0,0 +1,362 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/gophercloud/gophercloud"
+ "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups"
+ . "github.com/onsi/ginkgo/v2" //nolint:revive
+ . "github.com/onsi/gomega" //nolint:revive
+ "go.uber.org/mock/gomock"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+ "sigs.k8s.io/cluster-api/test/framework"
+ "sigs.k8s.io/cluster-api/util/annotations"
+ "sigs.k8s.io/cluster-api/util/patch"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ infrav1 "sigs.k8s.io/cluster-api-provider-openstack/api/v1beta1"
+ "sigs.k8s.io/cluster-api-provider-openstack/pkg/scope"
+)
+
+var (
+ reconcilerServerGroup *OpenStackServerGroupReconciler
+ testServerGroup *infrav1.OpenStackServerGroup
+)
+
+var _ = Describe("OpenStackServerGroup controller", func() {
+ testServerGroupName := "test-servergroup"
+ testNum := 0
+
+ BeforeEach(func() {
+ ctx = context.TODO()
+ testNum++
+ testNamespace = fmt.Sprintf("testservergroup-%d", testNum)
+
+ // Create a standard ServerGroup definition for all tests
+ testServerGroup = &infrav1.OpenStackServerGroup{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: infrav1.GroupVersion.Group + "/" + infrav1.GroupVersion.Version,
+ Kind: "OpenStackServerGroup",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: testServerGroupName,
+ Namespace: testNamespace,
+ },
+ Spec: infrav1.OpenStackServerGroupSpec{},
+ Status: infrav1.OpenStackServerGroupStatus{},
+ }
+ // Set finalizers, so the first reconcile doesn't need to by default.
+ testServerGroup.SetFinalizers([]string{infrav1.ServerGroupFinalizer})
+
+ input := framework.CreateNamespaceInput{
+ Creator: k8sClient,
+ Name: testNamespace,
+ }
+ framework.CreateNamespace(ctx, input)
+
+ mockCtrl = gomock.NewController(GinkgoT())
+ mockScopeFactory = scope.NewMockScopeFactory(mockCtrl, "")
+ reconcilerServerGroup = func() *OpenStackServerGroupReconciler {
+ return &OpenStackServerGroupReconciler{
+ Client: k8sClient,
+ ScopeFactory: mockScopeFactory,
+ }
+ }()
+ })
+
+ AfterEach(func() {
+ orphan := metav1.DeletePropagationOrphan
+ deleteOptions := client.DeleteOptions{
+ PropagationPolicy: &orphan,
+ }
+
+ // Delete OpenstackServerGroup
+ patchHelper, err := patch.NewHelper(testServerGroup, k8sClient)
+ Expect(err).To(BeNil())
+ err = patchHelper.Patch(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ err = k8sClient.Delete(ctx, testServerGroup, &deleteOptions)
+ Expect(err).To(BeNil())
+ input := framework.DeleteNamespaceInput{
+ Deleter: k8sClient,
+ Name: testNamespace,
+ }
+ framework.DeleteNamespace(ctx, input)
+ })
+
+ It("should do nothing when servergroup resource is paused", func() {
+ testServerGroup.SetName("paused")
+ annotations.AddAnnotations(testServerGroup, map[string]string{clusterv1.PausedAnnotation: "true"})
+ testServerGroup.SetFinalizers([]string{})
+
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ req := createRequestFromServerGroup(testServerGroup)
+
+ result, err := reconcilerServerGroup.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Ensure Finalizer was not set by paused reconcile
+ err = k8sClient.Get(ctx, req.NamespacedName, testServerGroup)
+ Expect(err).To(BeNil())
+ Expect(testServerGroup.GetFinalizers()).To(BeNil())
+ })
+
+ It("should do nothing when unable to get OS client", func() {
+ testServerGroup.SetName("no-openstack-client")
+
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ req := createRequestFromServerGroup(testServerGroup)
+
+ clientCreateErr := fmt.Errorf("Test failure")
+ mockScopeFactory.SetClientScopeCreateError(clientCreateErr)
+
+ result, err := reconcilerServerGroup.Reconcile(ctx, req)
+ Expect(err).To(MatchError(clientCreateErr))
+ Expect(result).To(Equal(reconcile.Result{}))
+ })
+
+ It("should add a finalizer on the first reconcile", func() {
+ testServerGroup.SetName("finalizer-add")
+ testServerGroup.SetFinalizers([]string{})
+
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Reconcile our resource and make sure the finalizer was set
+ req := createRequestFromServerGroup(testServerGroup)
+ result, err := reconcilerServerGroup.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Retrieve the server group from K8s client
+ err = k8sClient.Get(ctx, req.NamespacedName, testServerGroup)
+ Expect(err).To(BeNil())
+
+ Expect(testServerGroup.GetFinalizers()).To(Equal([]string{infrav1.ServerGroupFinalizer}))
+ })
+
+ It("should adopt an existing servergroup by name if status has no uuid set", func() {
+ testServerGroup.SetName("adopt-existing-servergroup")
+
+ // Set up servergroup spec, and status with no uuid
+ testServerGroup.Spec = infrav1.OpenStackServerGroupSpec{
+ Policy: "anti-affinity",
+ }
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ testServerGroup.Status = infrav1.OpenStackServerGroupStatus{
+ ID: "",
+ Ready: false,
+ }
+ // Write the test resource to k8s client
+ err = k8sClient.Status().Update(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Define and record the existing resource the reconcile will see.
+ servergroups := []servergroups.ServerGroup{
+ {
+ Name: "adopt-existing-servergroup",
+ ID: "adopted-servergroup-uuid",
+ Policies: []string{"anti-affinity"},
+ },
+ }
+ computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
+ computeClientRecorder.ListServerGroups().Return(servergroups, nil)
+
+ // Reconcile our resource, and make sure it adopted the expected resource.
+ req := createRequestFromServerGroup(testServerGroup)
+ result, err := reconcilerServerGroup.Reconcile(ctx, req)
+ Expect(err).To(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Retrieve the server group from K8s client
+ err = k8sClient.Get(ctx, req.NamespacedName, testServerGroup)
+ Expect(err).To(BeNil())
+
+ Expect(testServerGroup.Status.ID).To(Equal("adopted-servergroup-uuid"))
+ Expect(testServerGroup.Status.Ready).To(BeTrue())
+ })
+
+ It("should requeue and not adopt if multiple servergroups exist with the same name", func() {
+ testServerGroup.SetName("multiple-named-servergroup")
+
+ // Set up servergroup spec, and status with no uuid
+ testServerGroup.Spec = infrav1.OpenStackServerGroupSpec{
+ Policy: "anti-affinity",
+ }
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ testServerGroup.Status = infrav1.OpenStackServerGroupStatus{
+ ID: "",
+ Ready: false,
+ }
+ // Write the test resource to k8s client
+ err = k8sClient.Status().Update(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Define and record the existing resources the reconcile will see.
+ servergroups := []servergroups.ServerGroup{
+ {
+ Name: "multiple-named-servergroup",
+ ID: "multiple-named-servergroup-uuid1",
+ Policies: []string{"anti-affinity"},
+ },
+ {
+ Name: "multiple-named-servergroup",
+ ID: "multiple-named-servergroup-uuid2",
+ Policies: []string{"soft-anti-affinity"},
+ },
+ }
+ computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
+ computeClientRecorder.ListServerGroups().Return(servergroups, nil)
+
+ // Reconcile our resource, it should return an error
+ req := createRequestFromServerGroup(testServerGroup)
+ result, err := reconcilerServerGroup.Reconcile(ctx, req)
+ Expect(err).NotTo(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Retrieve the server group from K8s client
+ err = k8sClient.Get(ctx, req.NamespacedName, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // No ServerGroup should have been adopted
+ Expect(testServerGroup.Status.ID).To(Equal(""))
+ Expect(testServerGroup.Status.Ready).To(BeFalse())
+ })
+
+ It("should succeed reconcile delete even with no servergroup adopted", func() {
+ testServerGroup.SetName("delete-existing-servergroup-no-uuid")
+
+ // Set up servergroup spec, and status with no uuid.
+ testServerGroup.Spec = infrav1.OpenStackServerGroupSpec{
+ Policy: "anti-affinity",
+ }
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ testServerGroup.Status = infrav1.OpenStackServerGroupStatus{
+ ID: "",
+ Ready: false,
+ }
+ // Write the test resource to k8s client
+ err = k8sClient.Status().Update(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Reconcile our resource, and make sure it finds the expected resource, then deletes it.
+ log := GinkgoLogr
+ clientScope, err := mockScopeFactory.NewClientScopeFromServerGroup(ctx, k8sClient, testServerGroup, nil, log)
+ Expect(err).To(BeNil())
+ scope := scope.NewWithLogger(clientScope, log)
+
+ result, err := reconcilerServerGroup.reconcileDelete(scope, testServerGroup)
+ Expect(err).To(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Finalizer should now be removed.
+ Expect(testServerGroup.GetFinalizers()).To(Equal([]string{}))
+ })
+
+ It("should succeed reconcile delete even if the servergroup does not exist", func() {
+ testServerGroup.SetName("delete-servergroup-not-exist")
+
+ // Set up servergroup spec, and status with no uuid.
+ testServerGroup.Spec = infrav1.OpenStackServerGroupSpec{
+ Policy: "anti-affinity",
+ }
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ testServerGroup.Status = infrav1.OpenStackServerGroupStatus{
+ ID: "1234",
+ Ready: true,
+ }
+ // Write the test resource to k8s client
+ err = k8sClient.Status().Update(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Define and record the function calls the reconcile will see.
+ computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
+ computeClientRecorder.DeleteServerGroup("1234").Return(gophercloud.ErrResourceNotFound{})
+
+ // Reconcile our resource, and make sure it finds the expected resource, then deletes it.
+ log := GinkgoLogr
+ clientScope, err := mockScopeFactory.NewClientScopeFromServerGroup(ctx, k8sClient, testServerGroup, nil, log)
+ Expect(err).To(BeNil())
+ scope := scope.NewWithLogger(clientScope, log)
+
+ result, err := reconcilerServerGroup.reconcileDelete(scope, testServerGroup)
+ Expect(err).To(BeNil())
+ Expect(result).To(Equal(reconcile.Result{}))
+
+ // Finalizer should now be removed.
+ Expect(testServerGroup.GetFinalizers()).To(Equal([]string{}))
+ })
+
+ It("should requeue if the service returns temporary errors", func() {
+ testServerGroup.SetName("requeue-on-openstack-error")
+
+ // Set up servergroup spec
+ testServerGroup.Spec = infrav1.OpenStackServerGroupSpec{
+ Policy: "anti-affinity",
+ }
+ err := k8sClient.Create(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+ testServerGroup.Status = infrav1.OpenStackServerGroupStatus{
+ ID: "",
+ Ready: false,
+ }
+ // Write the test resource to k8s client
+ err = k8sClient.Status().Update(ctx, testServerGroup)
+ Expect(err).To(BeNil())
+
+ // Define and record the existing resource the reconcile will see.
+ servergroups := []servergroups.ServerGroup{}
+ computeClientRecorder := mockScopeFactory.ComputeClient.EXPECT()
+ computeClientRecorder.ListServerGroups().Return(servergroups, nil)
+ expectedError := gophercloud.ErrDefault500{}
+ computeClientRecorder.CreateServerGroup("requeue-on-openstack-error", "anti-affinity").Return(expectedError)
+
+ // Reconcile our resource, and make sure it adopted the expected resource.
+ log := GinkgoLogr
+ clientScope, err := mockScopeFactory.NewClientScopeFromServerGroup(ctx, k8sClient, testServerGroup, nil, log)
+ Expect(err).To(BeNil())
+ scope := scope.NewWithLogger(clientScope, log)
+
+ result, err := reconcilerServerGroup.reconcileNormal(scope, testServerGroup)
+ // Expect error to surface with empty result.
+ // Due to the error, the reconcile should be re-tried with default backoff by ControllerRuntime
+ Expect(err).To(Equal(expectedError))
+ Expect(result).To(Equal(reconcile.Result{}))
+ })
+})
+
+func createRequestFromServerGroup(openStackServerGroup *infrav1.OpenStackServerGroup) reconcile.Request {
+ return reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: openStackServerGroup.GetName(),
+ Namespace: openStackServerGroup.GetNamespace(),
+ },
+ }
+}
diff --git a/docs/book/src/api/v1beta1/api.md b/docs/book/src/api/v1beta1/api.md
index dce894ec0f..2c01395a1a 100644
--- a/docs/book/src/api/v1beta1/api.md
+++ b/docs/book/src/api/v1beta1/api.md
@@ -716,6 +716,20 @@ ServerGroupParam
+serverGroupRef
+
+
+ServerGroupRef
+
+
+ |
+
+(Optional)
+ The server group ref to assign the machine to.
+ |
+
+
+
identityRef
@@ -3113,7 +3127,8 @@ OpenStackClusterTemplateResource
(Appears on:
OpenStackClusterSpec,
-OpenStackMachineSpec)
+OpenStackMachineSpec,
+OpenStackServerGroupSpec)
OpenStackIdentityReference is a reference to an infrastructure
@@ -3338,6 +3353,20 @@ ServerGroupParam
|
+serverGroupRef
+
+
+ServerGroupRef
+
+
+ |
+
+(Optional)
+ The server group ref to assign the machine to.
+ |
+
+
+
identityRef
@@ -3711,6 +3740,20 @@ ServerGroupParam
|
+serverGroupRef
+
+
+ServerGroupRef
+
+
+ |
+
+(Optional)
+ The server group ref to assign the machine to.
+ |
+
+
+
identityRef
@@ -3775,6 +3818,172 @@ OpenStackMachineTemplateResource
|
+OpenStackServerGroup
+
+
+
OpenStackServerGroup is the Schema for the openstackservergroups API.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+metadata
+
+Kubernetes meta/v1.ObjectMeta
+
+ |
+
+Refer to the Kubernetes API documentation for the fields of the
+metadata field.
+ |
+
+
+
+spec
+
+
+OpenStackServerGroupSpec
+
+
+ |
+
+
+
+
+
+
+policy
+
+string
+
+ |
+
+ Policy is a string with some valid values; affinity, anti-affinity, soft-affinity, soft-anti-affinity.
+ |
+
+
+
+identityRef
+
+
+OpenStackIdentityReference
+
+
+ |
+
+(Optional)
+ IdentityRef is a reference to a identity to be used when reconciling this resource
+ |
+
+
+ |
+
+
+
+status
+
+
+OpenStackServerGroupStatus
+
+
+ |
+
+ |
+
+
+
+OpenStackServerGroupSpec
+
+
+(Appears on:
+OpenStackServerGroup)
+
+
+
OpenStackServerGroupSpec defines the desired state of OpenStackServerGroup.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+policy
+
+string
+
+ |
+
+ Policy is a string with some valid values; affinity, anti-affinity, soft-affinity, soft-anti-affinity.
+ |
+
+
+
+identityRef
+
+
+OpenStackIdentityReference
+
+
+ |
+
+(Optional)
+ IdentityRef is a reference to a identity to be used when reconciling this resource
+ |
+
+
+
+OpenStackServerGroupStatus
+
+
+(Appears on:
+OpenStackServerGroup)
+
+
+
OpenStackServerGroupStatus defines the observed state of OpenStackServerGroup.
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+ready
+
+bool
+
+ |
+
+(Optional)
+ Ready is true when the resource is created.
+ |
+
+
+
+uuid
+
+string
+
+ |
+
+ UUID of provisioned ServerGroup
+ |
+
+
+
PortOpts
@@ -4004,7 +4213,7 @@ string
(Optional)
- ServerGroupID is the ID of the server group the machine should be added to and is calculated based on ServerGroupFilter.
+ServerGroupID is the ID of the server group the machine should be added to and is calculated based on ServerGroupFilter and ServerGroupRef.
|
@@ -4900,6 +5109,36 @@ ServerGroupFilter
+ServerGroupRef
+
+
+(Appears on:
+OpenStackMachineSpec)
+
+
+
+
+
+
+Field |
+Description |
+
+
+
+
+
+name
+
+string
+
+ |
+
+ Name of the OpenStackServerGroup resource to be used.
+Must be in the same namespace as the resource(s) being provisioned.
+ |
+
+
+
diff --git a/main.go b/main.go
index 76329967b3..2f21e74046 100644
--- a/main.go
+++ b/main.go
@@ -350,6 +350,16 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, caCerts []byte, sco
setupLog.Error(err, "unable to create controller", "controller", "FloatingIPPool")
os.Exit(1)
}
+ if err := (&controllers.OpenStackServerGroupReconciler{
+ Client: mgr.GetClient(),
+ Recorder: mgr.GetEventRecorderFor("servergroup-controller"),
+ WatchFilterValue: watchFilterValue,
+ ScopeFactory: scopeFactory,
+ CaCertificates: caCerts,
+ }).SetupWithManager(ctx, mgr); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "OpenStackServerGroup")
+ os.Exit(1)
+ }
}
func setupWebhooks(mgr ctrl.Manager) {
diff --git a/pkg/clients/compute.go b/pkg/clients/compute.go
index 64ae059fc7..8141ce7381 100644
--- a/pkg/clients/compute.go
+++ b/pkg/clients/compute.go
@@ -63,6 +63,8 @@ type ComputeClient interface {
DeleteAttachedInterface(serverID, portID string) error
ListServerGroups() ([]servergroups.ServerGroup, error)
+ CreateServerGroup(name string, policy string) error
+ DeleteServerGroup(id string) error
}
type computeClient struct{ client *gophercloud.ServiceClient }
@@ -162,6 +164,27 @@ func (c computeClient) ListServerGroups() ([]servergroups.ServerGroup, error) {
return servergroups.ExtractServerGroups(allPages)
}
+func (c computeClient) CreateServerGroup(name string, policy string) error {
+ var servergroup servergroups.ServerGroup
+ mc := metrics.NewMetricPrometheusContext("server_group", "create")
+ // Use microversion <2.64 policies format.
+ opts := servergroups.CreateOpts{
+ Name: name,
+ Policies: []string{policy},
+ }
+ err := servergroups.Create(c.client, opts).ExtractInto(&servergroup)
+ if mc.ObserveRequest(err) != nil {
+ return err
+ }
+ return err
+}
+
+func (c computeClient) DeleteServerGroup(id string) error {
+ mc := metrics.NewMetricPrometheusContext("server_group", "delete")
+ err := servergroups.Delete(c.client, id).ExtractErr()
+ return mc.ObserveRequestIgnoreNotFoundorConflict(err)
+}
+
type computeErrorClient struct{ error }
// NewComputeErrorClient returns a ComputeClient in which every method returns the given error.
@@ -204,3 +227,11 @@ func (e computeErrorClient) DeleteAttachedInterface(_, _ string) error {
func (e computeErrorClient) ListServerGroups() ([]servergroups.ServerGroup, error) {
return nil, e.error
}
+
+func (e computeErrorClient) CreateServerGroup(_ string, _ string) error {
+ return e.error
+}
+
+func (e computeErrorClient) DeleteServerGroup(_ string) error {
+ return e.error
+}
diff --git a/pkg/clients/mock/compute.go b/pkg/clients/mock/compute.go
index 96a1514b3d..3a9b217493 100644
--- a/pkg/clients/mock/compute.go
+++ b/pkg/clients/mock/compute.go
@@ -75,6 +75,20 @@ func (mr *MockComputeClientMockRecorder) CreateServer(arg0 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServer", reflect.TypeOf((*MockComputeClient)(nil).CreateServer), arg0)
}
+// CreateServerGroup mocks base method.
+func (m *MockComputeClient) CreateServerGroup(arg0, arg1 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CreateServerGroup", arg0, arg1)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CreateServerGroup indicates an expected call of CreateServerGroup.
+func (mr *MockComputeClientMockRecorder) CreateServerGroup(arg0, arg1 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServerGroup", reflect.TypeOf((*MockComputeClient)(nil).CreateServerGroup), arg0, arg1)
+}
+
// DeleteAttachedInterface mocks base method.
func (m *MockComputeClient) DeleteAttachedInterface(arg0, arg1 string) error {
m.ctrl.T.Helper()
@@ -103,6 +117,20 @@ func (mr *MockComputeClientMockRecorder) DeleteServer(arg0 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServer", reflect.TypeOf((*MockComputeClient)(nil).DeleteServer), arg0)
}
+// DeleteServerGroup mocks base method.
+func (m *MockComputeClient) DeleteServerGroup(arg0 string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "DeleteServerGroup", arg0)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// DeleteServerGroup indicates an expected call of DeleteServerGroup.
+func (mr *MockComputeClientMockRecorder) DeleteServerGroup(arg0 any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServerGroup", reflect.TypeOf((*MockComputeClient)(nil).DeleteServerGroup), arg0)
+}
+
// GetFlavorFromName mocks base method.
func (m *MockComputeClient) GetFlavorFromName(arg0 string) (*flavors.Flavor, error) {
m.ctrl.T.Helper()
diff --git a/pkg/cloud/services/compute/servergroup.go b/pkg/cloud/services/compute/servergroup.go
index e36b0edc51..07309ad26e 100644
--- a/pkg/cloud/services/compute/servergroup.go
+++ b/pkg/cloud/services/compute/servergroup.go
@@ -38,7 +38,7 @@ func (s *Service) GetServerGroupID(serverGroupParam *infrav1.ServerGroupParam) (
}
// otherwise fallback to looking up by name, which is slower
- serverGroup, err := s.getServerGroupByName(*serverGroupParam.Filter.Name)
+ serverGroup, err := s.GetServerGroupByName(*serverGroupParam.Filter.Name, false)
if err != nil {
return "", err
}
@@ -46,7 +46,8 @@ func (s *Service) GetServerGroupID(serverGroupParam *infrav1.ServerGroupParam) (
return serverGroup.ID, nil
}
-func (s *Service) getServerGroupByName(serverGroupName string) (*servergroups.ServerGroup, error) {
+// TODO(dalees): This second parameter is messy. It would be better to differentiate 404, 'too many' and 'actual failure' in the caller.
+func (s *Service) GetServerGroupByName(serverGroupName string, ignoreNotFound bool) (*servergroups.ServerGroup, error) {
allServerGroups, err := s.getComputeClient().ListServerGroups()
if err != nil {
return nil, err
@@ -62,11 +63,22 @@ func (s *Service) getServerGroupByName(serverGroupName string) (*servergroups.Se
switch len(serverGroups) {
case 0:
+ if ignoreNotFound {
+ return nil, nil
+ }
return nil, fmt.Errorf("no server group with name %s could be found", serverGroupName)
case 1:
return &serverGroups[0], nil
default:
// this will never happen due to duplicate IDs, only duplicate names, so our error message is worded accordingly
- return nil, fmt.Errorf("too many server groups with name %s were found", serverGroupName)
+ return &serverGroups[0], fmt.Errorf("too many server groups with name %s were found", serverGroupName)
}
}
+
+func (s *Service) CreateServerGroup(serverGroupName string, policy string) error {
+ return s.getComputeClient().CreateServerGroup(serverGroupName, policy)
+}
+
+func (s *Service) DeleteServerGroup(id string) error {
+ return s.getComputeClient().DeleteServerGroup(id)
+}
diff --git a/pkg/scope/mock.go b/pkg/scope/mock.go
index 759ee5220d..20d14e1176 100644
--- a/pkg/scope/mock.go
+++ b/pkg/scope/mock.go
@@ -86,6 +86,13 @@ func (f *MockScopeFactory) NewClientScopeFromFloatingIPPool(_ context.Context, _
return f, nil
}
+func (f *MockScopeFactory) NewClientScopeFromServerGroup(_ context.Context, _ client.Client, _ *infrav1.OpenStackServerGroup, _ []byte, _ logr.Logger) (Scope, error) {
+ if f.clientScopeCreateError != nil {
+ return nil, f.clientScopeCreateError
+ }
+ return f, nil
+}
+
func (f *MockScopeFactory) NewComputeClient() (clients.ComputeClient, error) {
return f.ComputeClient, nil
}
diff --git a/pkg/scope/provider.go b/pkg/scope/provider.go
index ff46916949..fa37bbce70 100644
--- a/pkg/scope/provider.go
+++ b/pkg/scope/provider.go
@@ -122,6 +122,29 @@ func (f *providerScopeFactory) NewClientScopeFromFloatingIPPool(ctx context.Cont
return NewCachedProviderScope(f.clientCache, cloud, caCert, logger)
}
+func (f *providerScopeFactory) NewClientScopeFromServerGroup(ctx context.Context, ctrlClient client.Client, openStackServerGroup *infrav1.OpenStackServerGroup, defaultCACert []byte, logger logr.Logger) (Scope, error) {
+ var cloud clientconfig.Cloud
+ var caCert []byte
+
+ if openStackServerGroup.Spec.IdentityRef != nil {
+ var err error
+ cloud, caCert, err = getCloudFromSecret(ctx, ctrlClient, openStackServerGroup.Namespace, openStackServerGroup.Spec.IdentityRef.Name, openStackServerGroup.Spec.IdentityRef.CloudName)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if caCert == nil {
+ caCert = defaultCACert
+ }
+
+ if f.clientCache == nil {
+ return NewProviderScope(cloud, caCert, logger)
+ }
+
+ return NewCachedProviderScope(f.clientCache, cloud, caCert, logger)
+}
+
func getScopeCacheKey(cloud clientconfig.Cloud) (string, error) {
key, err := hash.ComputeSpewHash(cloud)
if err != nil {
diff --git a/pkg/scope/scope.go b/pkg/scope/scope.go
index 34f6d28880..3931852e12 100644
--- a/pkg/scope/scope.go
+++ b/pkg/scope/scope.go
@@ -40,11 +40,12 @@ func NewFactory(maxCacheSize int) Factory {
}
}
-// Factory instantiates a new Scope using credentials from either a cluster or a machine.
+// Factory instantiates a new Scope using credentials from the provided resources.
type Factory interface {
NewClientScopeFromMachine(ctx context.Context, ctrlClient client.Client, openStackMachine *infrav1.OpenStackMachine, openStackCluster *infrav1.OpenStackCluster, defaultCACert []byte, logger logr.Logger) (Scope, error)
NewClientScopeFromCluster(ctx context.Context, ctrlClient client.Client, openStackCluster *infrav1.OpenStackCluster, defaultCACert []byte, logger logr.Logger) (Scope, error)
NewClientScopeFromFloatingIPPool(ctx context.Context, ctrlClient client.Client, openStackCluster *infrav1alpha1.OpenStackFloatingIPPool, defaultCACert []byte, logger logr.Logger) (Scope, error)
+ NewClientScopeFromServerGroup(ctx context.Context, ctrlClient client.Client, openStackServerGroup *infrav1.OpenStackServerGroup, defaultCACert []byte, logger logr.Logger) (Scope, error)
}
// Scope contains arguments common to most operations.