diff --git a/api/v1alpha1/ionoscloudloadbalancer_types.go b/api/v1alpha1/ionoscloudloadbalancer_types.go index 1c600f35..70f83c5f 100644 --- a/api/v1alpha1/ionoscloudloadbalancer_types.go +++ b/api/v1alpha1/ionoscloudloadbalancer_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1alpha1 import ( + "strconv" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -54,10 +56,6 @@ type LoadBalancerSource struct { // NLB is used for setting up a network load balancer. //+optional NLB *NLBSpec `json:"nlb,omitempty"` - - // KubeVIP is used for setting up a highly available control plane. - //+optional - KubeVIP *KubeVIPSpec `json:"kubeVIP,omitempty"` } // NLBSpec defines the spec for a network load balancer. @@ -67,14 +65,18 @@ type NLBSpec struct { //+kubebuilder:validation:Format=uuid //+required DatacenterID string `json:"datacenterID"` -} -// KubeVIPSpec defines the spec for a high availability load balancer. -type KubeVIPSpec struct { - // Image is the container image to use for the KubeVIP static pod. - // If not provided, the default image will be used. + // Algorithm is the load balancing algorithm. + //+kubebuilder:validation:Enum=ROUND_ROBIN;LEAST_CONNECTION;RANDOM;SOURCE_IP + //+kubebuilder:default=ROUND_ROBIN //+optional - Image string `json:"image,omitempty"` + Algorithm string `json:"algorithm,omitempty"` + + // Protocol is the load balancing protocol. + //+kubebuilder:validation:Enum=TCP;HTTP + //+kubebuilder:default=TCP + //+optional + Protocol string `json:"protocol,omitempty"` } // IonosCloudLoadBalancerStatus defines the observed state of IonosCloudLoadBalancer. @@ -91,6 +93,22 @@ type IonosCloudLoadBalancerStatus struct { // cloud resource that is being provisioned. //+optional CurrentRequest *ProvisioningRequest `json:"currentRequest,omitempty"` + + // NLBStatus defines the status for a network load balancer. + //+optional + NLBStatus *NLBStatus `json:"nlbStatus,omitempty"` +} + +// NLBStatus holds information about the NLB configuration of the load balancer. +type NLBStatus struct { + // ID is the ID of the network load balancer. + ID string `json:"id,omitempty"` + + // PublicLANID is the ID of the LAN used for incoming traffic. + PublicLANID int32 `json:"publicLANID,omitempty"` + + // PrivateLANID is the ID of the LAN used for outgoing traffic. + PrivateLANID int32 `json:"privateLANID,omitempty"` } // +kubebuilder:object:root=true @@ -125,6 +143,86 @@ func (l *IonosCloudLoadBalancer) SetConditions(conditions clusterv1.Conditions) l.Status.Conditions = conditions } +// SetCurrentRequest sets the current provisioning request. +func (l *IonosCloudLoadBalancer) SetCurrentRequest(method, status, requestPath string) { + l.Status.CurrentRequest = &ProvisioningRequest{ + Method: method, + State: status, + RequestPath: requestPath, + } +} + +// DeleteCurrentRequest deletes the current provisioning request. +func (l *IonosCloudLoadBalancer) DeleteCurrentRequest() { + l.Status.CurrentRequest = nil +} + +// GetNLBID returns the NLB ID from the status. +func (l *IonosCloudLoadBalancer) GetNLBID() string { + if l.Status.NLBStatus == nil { + return "" + } + + return l.Status.NLBStatus.ID +} + +// SetNLBID sets the NLB ID in the status. +func (l *IonosCloudLoadBalancer) SetNLBID(nlbID string) { + if l.Status.NLBStatus == nil { + l.Status.NLBStatus = &NLBStatus{} + } + + l.Status.NLBStatus.ID = nlbID +} + +// SetPublicLANID sets the public LAN ID in the status. +func (l *IonosCloudLoadBalancer) SetPublicLANID(id string) error { + if l.Status.NLBStatus == nil { + l.Status.NLBStatus = &NLBStatus{} + } + lanID, err := strconv.ParseInt(id, 10, 32) + if err != nil { + return err + } + + l.Status.NLBStatus.PublicLANID = int32(lanID) + return nil +} + +// GetPublicLANID returns the public LAN ID from the status. +func (l *IonosCloudLoadBalancer) GetPublicLANID() string { + if l.Status.NLBStatus == nil { + return "" + } + + return strconv.Itoa(int(l.Status.NLBStatus.PublicLANID)) +} + +// SetPrivateLANID sets the private LAN ID in the status. +func (l *IonosCloudLoadBalancer) SetPrivateLANID(id string) error { + if l.Status.NLBStatus == nil { + l.Status.NLBStatus = &NLBStatus{} + } + + lanID, err := strconv.ParseInt(id, 10, 32) + if err != nil { + return err + } + + l.Status.NLBStatus.PrivateLANID = int32(lanID) + + return nil +} + +// GetPrivateLANID returns the private LAN ID from the status. +func (l *IonosCloudLoadBalancer) GetPrivateLANID() string { + if l.Status.NLBStatus == nil { + return "" + } + + return strconv.Itoa(int(l.Status.NLBStatus.PrivateLANID)) +} + func init() { - objectTypes = append(objectTypes, &IonosCloudLoadBalancer{}) + objectTypes = append(objectTypes, &IonosCloudLoadBalancer{}, &IonosCloudLoadBalancerList{}) } diff --git a/api/v1alpha1/ionoscloudloadbalancer_types_test.go b/api/v1alpha1/ionoscloudloadbalancer_types_test.go index ca6db854..d698263b 100644 --- a/api/v1alpha1/ionoscloudloadbalancer_types_test.go +++ b/api/v1alpha1/ionoscloudloadbalancer_types_test.go @@ -56,17 +56,6 @@ var _ = Describe("IonosCloudLoadBalancer", func() { }) Context("Create", func() { - When("Using a KubeVIP load balancer", func() { - It("Should succeed when no image is provided", func() { - dlb := defaultLoadBalancer(LoadBalancerSource{KubeVIP: &KubeVIPSpec{}}) - Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) - }) - It("Should succeed with an endpoint and a port", func() { - dlb := defaultLoadBalancer(LoadBalancerSource{KubeVIP: &KubeVIPSpec{}}) - dlb.Spec.LoadBalancerEndpoint = exampleEndpoint - Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) - }) - }) When("Using an NLB", func() { It("Should fail when not providing a datacenter ID", func() { dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{}}) @@ -80,6 +69,24 @@ var _ = Describe("IonosCloudLoadBalancer", func() { dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID}}) Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) }) + It("Should have ROUND_ROBIN as the default algorithm", func() { + dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID}}) + Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) + Expect(dlb.Spec.NLB.Algorithm).To(Equal("ROUND_ROBIN")) + }) + It("Should fail when providing an invalid algorithm", func() { + dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID, Algorithm: "INVALID"}}) + Expect(k8sClient.Create(context.Background(), dlb)).NotTo(Succeed()) + }) + It("Should have TCP as the default protocol", func() { + dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID}}) + Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) + Expect(dlb.Spec.NLB.Protocol).To(Equal("TCP")) + }) + It("Should fail when providing an invalid protocol", func() { + dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID, Protocol: "INVALID"}}) + Expect(k8sClient.Create(context.Background(), dlb)).NotTo(Succeed()) + }) It("Should succeed providing an endpoint and a port", func() { dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID}}) dlb.Spec.LoadBalancerEndpoint = exampleEndpoint @@ -92,15 +99,6 @@ var _ = Describe("IonosCloudLoadBalancer", func() { }) }) Context("Update", func() { - When("Using a KubeVIP load balancer", func() { - It("Should succeed creating a KubeVIP load balancer with an empty endpoint and updating it", func() { - dlb := defaultLoadBalancer(LoadBalancerSource{KubeVIP: &KubeVIPSpec{}}) - Expect(k8sClient.Create(context.Background(), dlb)).To(Succeed()) - - dlb.Spec.LoadBalancerEndpoint = exampleEndpoint - Expect(k8sClient.Update(context.Background(), dlb)).To(Succeed()) - }) - }) When("Using an NLB", func() { It("Should fail when attempting to update the datacenter ID", func() { dlb := defaultLoadBalancer(LoadBalancerSource{NLB: &NLBSpec{DatacenterID: exampleDatacenterID}}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ce76a4ad..d3f2e4a3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -395,6 +395,11 @@ func (in *IonosCloudLoadBalancerStatus) DeepCopyInto(out *IonosCloudLoadBalancer *out = new(ProvisioningRequest) **out = **in } + if in.NLBStatus != nil { + in, out := &in.NLBStatus, &out.NLBStatus + *out = new(NLBStatus) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IonosCloudLoadBalancerStatus. @@ -642,21 +647,6 @@ func (in *IonosCloudMachineTemplateSpec) DeepCopy() *IonosCloudMachineTemplateSp return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeVIPSpec) DeepCopyInto(out *KubeVIPSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVIPSpec. -func (in *KubeVIPSpec) DeepCopy() *KubeVIPSpec { - if in == nil { - return nil - } - out := new(KubeVIPSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerSource) DeepCopyInto(out *LoadBalancerSource) { *out = *in @@ -665,11 +655,6 @@ func (in *LoadBalancerSource) DeepCopyInto(out *LoadBalancerSource) { *out = new(NLBSpec) **out = **in } - if in.KubeVIP != nil { - in, out := &in.KubeVIP, &out.KubeVIP - *out = new(KubeVIPSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSource. @@ -744,6 +729,21 @@ func (in *NLBSpec) DeepCopy() *NLBSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NLBStatus) DeepCopyInto(out *NLBStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NLBStatus. +func (in *NLBStatus) DeepCopy() *NLBStatus { + if in == nil { + return nil + } + out := new(NLBStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Network) DeepCopyInto(out *Network) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index c2a63be8..c8533064 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -19,6 +19,7 @@ limitations under the License. package main import ( + "context" "flag" "os" @@ -100,47 +101,54 @@ func main() { ctx := ctrl.SetupSignalHandler() + //+kubebuilder:scaffold:builder + if err := setupControllers(ctx, mgr); err != nil { + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + setupLog.Info("Starting manager") + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) + } +} + +func setupControllers(ctx context.Context, mgr ctrl.Manager) error { const errMsg = "unable to create controller" - if err = iccontroller.NewIonosCloudClusterReconciler(mgr).SetupWithManager( + if err := iccontroller.NewIonosCloudClusterReconciler(mgr).SetupWithManager( ctx, mgr, controller.Options{MaxConcurrentReconciles: icClusterConcurrency}, ); err != nil { setupLog.Error(err, errMsg, "controller", "IonosCloudCluster") - os.Exit(1) + return err } - if err = iccontroller.NewIonosCloudMachineReconciler(mgr).SetupWithManager( + if err := iccontroller.NewIonosCloudMachineReconciler(mgr).SetupWithManager( mgr, controller.Options{MaxConcurrentReconciles: icMachineConcurrency}, ); err != nil { setupLog.Error(err, errMsg, "controller", "IonosCloudMachine") - os.Exit(1) + return err } - if err = iccontroller.NewIonosCloudLoadBalancerReconciler(mgr).SetupWithManager( + if err := iccontroller.NewIonosCloudLoadBalancerReconciler(mgr).SetupWithManager( ctx, mgr, controller.Options{MaxConcurrentReconciles: icLoadBalancerConcurrency}, ); err != nil { setupLog.Error(err, errMsg, "controller", "IonosCloudLoadBalancer") - os.Exit(1) + return err } - //+kubebuilder:scaffold:builder - - if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up health check") - os.Exit(1) - } - if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error(err, "unable to set up ready check") - os.Exit(1) - } - - setupLog.Info("Starting manager") - if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") - os.Exit(1) - } + return nil } // initFlags parses the command line flags. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudloadbalancers.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudloadbalancers.yaml index 81c9bbfd..0c175c30 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudloadbalancers.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ionoscloudloadbalancers.yaml @@ -45,16 +45,6 @@ spec: spec: description: IonosCloudLoadBalancerSpec defines the desired state of IonosCloudLoadBalancer. properties: - kubeVIP: - description: KubeVIP is used for setting up a highly available control - plane. - properties: - image: - description: |- - Image is the container image to use for the KubeVIP static pod. - If not provided, the default image will be used. - type: string - type: object loadBalancerEndpoint: description: |- LoadBalancerEndpoint represents the endpoint of the load balanced control plane. @@ -82,6 +72,15 @@ spec: nlb: description: NLB is used for setting up a network load balancer. properties: + algorithm: + default: ROUND_ROBIN + description: Algorithm is the load balancing algorithm. + enum: + - ROUND_ROBIN + - LEAST_CONNECTION + - RANDOM + - SOURCE_IP + type: string datacenterID: description: DatacenterID is the ID of the datacenter where the load balancer should be created. @@ -90,6 +89,13 @@ spec: x-kubernetes-validations: - message: datacenterID is immutable rule: self == oldSelf + protocol: + default: TCP + description: Protocol is the load balancing protocol. + enum: + - TCP + - HTTP + type: string required: - datacenterID type: object @@ -167,6 +173,23 @@ spec: - method - requestPath type: object + nlbStatus: + description: NLBStatus defines the status for a network load balancer. + properties: + id: + description: ID is the ID of the network load balancer. + type: string + privateLANID: + description: PrivateLANID is the ID of the LAN used for outgoing + traffic. + format: int32 + type: integer + publicLANID: + description: PublicLANID is the ID of the LAN used for incoming + traffic. + format: int32 + type: integer + type: object ready: description: Ready indicates that the load balancer is ready. type: boolean diff --git a/go.mod b/go.mod index 6677cc6a..5357cc3d 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/onsi/gomega v1.34.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 + golang.org/x/sync v0.7.0 k8s.io/api v0.30.3 k8s.io/apimachinery v0.30.3 k8s.io/client-go v0.30.3 @@ -121,7 +122,6 @@ require ( golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect diff --git a/internal/controller/ionoscloudcluster_controller.go b/internal/controller/ionoscloudcluster_controller.go index bde80b10..8493d0e8 100644 --- a/internal/controller/ionoscloudcluster_controller.go +++ b/internal/controller/ionoscloudcluster_controller.go @@ -224,10 +224,10 @@ func (r *IonosCloudClusterReconciler) reconcileDelete( } var reconcileSequence []serviceReconcileStep[scope.Cluster] - // TODO: This logic needs to move to another controller. if clusterScope.IonosCluster.Spec.LoadBalancerProviderRef != nil { reconcileSequence = []serviceReconcileStep[scope.Cluster]{ {"ReconcileControlPlaneEndpointDeletion", cloudService.ReconcileControlPlaneEndpointDeletion}, + {"ReconcileLoadBalancerDeletion", r.reconcileLoadBalancerDeletion}, } } @@ -240,13 +240,43 @@ func (r *IonosCloudClusterReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, err } } + if err := removeCredentialsFinalizer(ctx, r.Client, clusterScope.IonosCluster); err != nil { return ctrl.Result{}, err } + controllerutil.RemoveFinalizer(clusterScope.IonosCluster, infrav1.ClusterFinalizer) return ctrl.Result{}, nil } +func (r *IonosCloudClusterReconciler) reconcileLoadBalancerDeletion(ctx context.Context, clusterScope *scope.Cluster) (requeue bool, err error) { + logger := ctrl.LoggerFrom(ctx) + // We need to wait until the load balancer was deleted before we can remove the finalizers. + if clusterScope.IonosCluster.Spec.LoadBalancerProviderRef != nil { + loadBalancer := infrav1.IonosCloudLoadBalancer{} + lbKey := client.ObjectKey{ + Namespace: clusterScope.IonosCluster.GetNamespace(), + Name: clusterScope.IonosCluster.Spec.LoadBalancerProviderRef.Name, + } + + err := r.Client.Get(ctx, lbKey, &loadBalancer) + if client.IgnoreNotFound(err) != nil { + return true, err + } + + if err == nil { + if loadBalancer.DeletionTimestamp.IsZero() { + logger.Info("Deleting load balancer", "loadBalancer", loadBalancer.Name) + return true, r.Client.Delete(ctx, &loadBalancer) + } + + return true, nil + } + } + + return false, nil +} + func (*IonosCloudClusterReconciler) checkRequestStatus( ctx context.Context, clusterScope *scope.Cluster, cloudService *cloud.Service, ) (requeue bool, retErr error) { diff --git a/internal/controller/ionoscloudloadbalancer_controller.go b/internal/controller/ionoscloudloadbalancer_controller.go index 759cddd6..86f5226f 100644 --- a/internal/controller/ionoscloudloadbalancer_controller.go +++ b/internal/controller/ionoscloudloadbalancer_controller.go @@ -32,11 +32,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/loadbalancing" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/locker" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) @@ -104,8 +106,6 @@ func (r *IonosCloudLoadBalancerReconciler) Reconcile( return ctrl.Result{}, nil } - // TODO(lubedacht) this check needs to move into a validating webhook and should prevent that the resource - // can be applied in the first place. if err = r.validateLoadBalancerSource(ionosCloudLoadBalancer.Spec.LoadBalancerSource); err != nil { return ctrl.Result{}, reconcile.TerminalError(err) } @@ -146,10 +146,10 @@ func (r *IonosCloudLoadBalancerReconciler) Reconcile( } if !ionosCloudLoadBalancer.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, loadBalancerScope, prov) + return r.reconcileDelete(ctx, loadBalancerScope, prov, cloudService) } - return r.reconcileNormal(ctx, loadBalancerScope, prov) + return r.reconcileNormal(ctx, loadBalancerScope, prov, cloudService) } func (r *IonosCloudLoadBalancerReconciler) getIonosCluster( @@ -177,6 +177,7 @@ func (r *IonosCloudLoadBalancerReconciler) reconcileNormal( ctx context.Context, loadBalancerScope *scope.LoadBalancer, prov loadbalancing.Provisioner, + cloudService *cloud.Service, ) (ctrl.Result, error) { logger := log.FromContext(ctx) logger.V(4).Info("Reconciling IonosCloudLoadBalancer") @@ -192,6 +193,10 @@ func (r *IonosCloudLoadBalancerReconciler) reconcileNormal( return ctrl.Result{}, reconcile.TerminalError(err) } + if requeue, err := r.checkRequestStatus(ctx, loadBalancerScope, cloudService); err != nil || requeue { + return ctrl.Result{Requeue: requeue, RequeueAfter: defaultReconcileDuration}, err + } + if requeue, err := prov.Provision(ctx, loadBalancerScope); err != nil || requeue { if err != nil { err = fmt.Errorf("error during provisioning: %w", err) @@ -200,6 +205,11 @@ func (r *IonosCloudLoadBalancerReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, err } + loadBalancerScope.ClusterScope.IonosCluster.Spec.ControlPlaneEndpoint = loadBalancerScope.Endpoint() + if err := loadBalancerScope.ClusterScope.PatchObject(); err != nil { + return ctrl.Result{}, err + } + conditions.MarkTrue(loadBalancerScope.LoadBalancer, infrav1.LoadBalancerReadyCondition) loadBalancerScope.LoadBalancer.Status.Ready = true @@ -207,14 +217,19 @@ func (r *IonosCloudLoadBalancerReconciler) reconcileNormal( return ctrl.Result{}, nil } -func (*IonosCloudLoadBalancerReconciler) reconcileDelete( +func (r *IonosCloudLoadBalancerReconciler) reconcileDelete( ctx context.Context, loadBalancerScope *scope.LoadBalancer, prov loadbalancing.Provisioner, + cloudService *cloud.Service, ) (ctrl.Result, error) { logger := log.FromContext(ctx) logger.V(4).Info("Deleting IonosCloudLoadBalancer") + if requeue, err := r.checkRequestStatus(ctx, loadBalancerScope, cloudService); err != nil || requeue { + return ctrl.Result{Requeue: requeue}, err + } + if requeue, err := prov.Destroy(ctx, loadBalancerScope); err != nil || requeue { if err != nil { err = fmt.Errorf("error during cleanup: %w", err) @@ -236,6 +251,7 @@ func (r *IonosCloudLoadBalancerReconciler) SetupWithManager(ctx context.Context, return ctrl.NewControllerManagedBy(mgr). WithOptions(options). For(&infrav1.IonosCloudLoadBalancer{}). + Watches(&infrav1.IonosCloudMachine{}, handler.EnqueueRequestsFromMapFunc(r.machineToLoadBalancerRequests)). WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). Complete(reconcile.AsReconciler[*infrav1.IonosCloudLoadBalancer](r.Client, r)) } @@ -258,14 +274,104 @@ func (*IonosCloudLoadBalancerReconciler) validateEndpoints(loadBalancerScope *sc return nil } -func (*IonosCloudLoadBalancerReconciler) validateLoadBalancerSource(source infrav1.LoadBalancerSource) error { - if source.NLB == nil && source.KubeVIP == nil { - return errors.New("exactly one source needs to be set, none are set") +func (*IonosCloudLoadBalancerReconciler) checkRequestStatus( + ctx context.Context, + loadBalancerScope *scope.LoadBalancer, + cloudService *cloud.Service, +) (requeue bool, retErr error) { + logger := ctrl.LoggerFrom(ctx) + loadBalancer := loadBalancerScope.LoadBalancer + + if req := loadBalancer.Status.CurrentRequest; req != nil { + logger.Info("Checking request status", "request", req.RequestPath, "method", req.Method) + status, message, err := cloudService.GetRequestStatus(ctx, req.RequestPath) + if err != nil { + retErr = fmt.Errorf("could not get request status: %w", err) + } else { + requeue, retErr = withStatus(status, message, &logger, func() error { + loadBalancer.DeleteCurrentRequest() + return nil + }) + } } - if source.NLB != nil && source.KubeVIP != nil { - return errors.New("exactly one source needs to be set, both are set") + return requeue, retErr +} + +func (*IonosCloudLoadBalancerReconciler) validateLoadBalancerSource(source infrav1.LoadBalancerSource) error { + if source.NLB == nil { + return errors.New("exactly one source needs to be set, NLB is not set") } return nil } + +func (r *IonosCloudLoadBalancerReconciler) machineToLoadBalancerRequests(ctx context.Context, obj client.Object) []reconcile.Request { + logger := ctrl.LoggerFrom(ctx) + machine, ok := obj.(*infrav1.IonosCloudMachine) + if !ok { + return nil + } + + if !isControlPlaneMachine(machine) || !isMachineAvailable(machine) { + return nil + } + + infraCluster, err := getInfraClusterFromMachine(ctx, r.Client, machine) + if err != nil { + logger.Error(err, "failed to get infra cluster from machine") + return nil + } + + if infraCluster.Spec.LoadBalancerProviderRef == nil { + logger.Info("Load balancer provider ref is not set, skipping load balancer reconciliation") + return nil + } + + return []reconcile.Request{{ + NamespacedName: client.ObjectKey{ + Namespace: infraCluster.GetNamespace(), + Name: infraCluster.Spec.LoadBalancerProviderRef.Name, + }, + }} +} + +func isControlPlaneMachine(machine *infrav1.IonosCloudMachine) bool { + labels := machine.GetLabels() + if labels == nil { + return false + } + + _, ok := labels[clusterv1.MachineControlPlaneLabel] + return ok +} + +func isMachineAvailable(machine *infrav1.IonosCloudMachine) bool { + return machine.DeletionTimestamp.IsZero() && conditions.IsTrue(machine, clusterv1.ReadyCondition) +} + +func getInfraClusterFromMachine(ctx context.Context, c client.Client, machine *infrav1.IonosCloudMachine) (*infrav1.IonosCloudCluster, error) { + labels := machine.GetLabels() + if labels == nil { + return nil, errors.New("machine has no labels") + } + + capiCluster, err := util.GetClusterFromMetadata(ctx, c, machine.ObjectMeta) + if err != nil { + return nil, err + } + + if !capiCluster.DeletionTimestamp.IsZero() { + return nil, errors.New("cluster is already being deleted") + } + + infraRef := capiCluster.Spec.InfrastructureRef + + infraClusterKey := client.ObjectKey{Namespace: infraRef.Namespace, Name: infraRef.Name} + var infraCluster infrav1.IonosCloudCluster + if err := c.Get(ctx, infraClusterKey, &infraCluster); err != nil { + return nil, err + } + + return &infraCluster, nil +} diff --git a/internal/controller/ionoscloudmachine_controller.go b/internal/controller/ionoscloudmachine_controller.go index 6f7e9839..3ee0022a 100644 --- a/internal/controller/ionoscloudmachine_controller.go +++ b/internal/controller/ionoscloudmachine_controller.go @@ -178,7 +178,7 @@ func (r *IonosCloudMachineReconciler) reconcileNormal( return ctrl.Result{RequeueAfter: defaultReconcileDuration}, nil } - k8sHelper := k8s.NewHelper(r.Client, log) + k8sHelper := k8s.NewHelper(r.Client, logger) reconcileSequence := []serviceReconcileStep[scope.Machine]{ {"ReconcileLAN", cloudService.ReconcileLAN}, {"ReconcileIPAddressClaims", k8sHelper.ReconcileIPAddresses}, @@ -220,7 +220,7 @@ func (r *IonosCloudMachineReconciler) reconcileDelete( return ctrl.Result{RequeueAfter: reducedReconcileDuration}, nil } - ipamHelper := k8s.NewHelper(r.Client, log) + ipamHelper := k8s.NewHelper(r.Client, logger) reconcileSequence := []serviceReconcileStep[scope.Machine]{ // NOTE(avorima): NICs, which are configured in an IP failover configuration, cannot be deleted // by a request to delete the server. Therefore, during deletion, we need to remove the NIC from @@ -342,7 +342,7 @@ func (r *IonosCloudMachineReconciler) SetupWithManager(mgr ctrl.Manager, options &clusterv1.Machine{}, handler.EnqueueRequestsFromMapFunc( util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind(infrav1.IonosCloudMachineType)))). - Complete(reconcile.AsReconciler(r.Client, r)) + Complete(reconcile.AsReconciler[*infrav1.IonosCloudMachine](r.Client, r)) } func (r *IonosCloudMachineReconciler) getClusterScope( diff --git a/internal/ionoscloud/client.go b/internal/ionoscloud/client.go index 0212815c..1007085d 100644 --- a/internal/ionoscloud/client.go +++ b/internal/ionoscloud/client.go @@ -39,6 +39,8 @@ type Client interface { StartServer(ctx context.Context, datacenterID, serverID string) (string, error) // DeleteVolume deletes the volume that matches the provided volumeID in the specified data center. DeleteVolume(ctx context.Context, datacenterID, volumeID string) (string, error) + // GetLAN returns the LAN that matches the provided lanID in the specified data center. + GetLAN(ctx context.Context, datacenterID, lanID string) (*sdk.Lan, error) // CreateLAN creates a new LAN with the provided properties in the specified data center, // returning the request path. CreateLAN(ctx context.Context, datacenterID string, properties sdk.LanProperties) (string, error) @@ -64,6 +66,8 @@ type Client interface { WaitForRequest(ctx context.Context, requestURL string) error // GetRequests returns the requests made in the last 24 hours that match the provided method and path. GetRequests(ctx context.Context, method, path string) ([]sdk.Request, error) + // CreateNIC creates a new NIC with the provided properties in the specified data center and server. + CreateNIC(ctx context.Context, datacenterID, serverID string, properties sdk.NicProperties) (string, error) // PatchNIC updates the NIC identified by nicID with the provided properties, returning the request location. PatchNIC(ctx context.Context, datacenterID, serverID, nicID string, properties sdk.NicProperties) (string, error) // GetDatacenterLocationByID returns the location of the data center identified by datacenterID. @@ -72,4 +76,16 @@ type Client interface { GetImage(ctx context.Context, imageID string) (*sdk.Image, error) // ListLabels returns a list of all available resource labels. ListLabels(ctx context.Context) ([]sdk.Label, error) + // CreateNLB creates a new Network Load Balancer with the provided properties in the specified data center. + CreateNLB(ctx context.Context, datacenterID string, properties sdk.NetworkLoadBalancerProperties) (string, error) + // DeleteNLB deletes the Network Load Balancer identified by the ID in the specified data center. + DeleteNLB(ctx context.Context, datacenterID, nlbID string) (string, error) + // GetNLB returns the Network Load Balancer identified by the ID in the specified data center. + GetNLB(ctx context.Context, datacenterID, nlbID string) (*sdk.NetworkLoadBalancer, error) + // ListNLBs returns a list of Network Load Balancers in the specified data center. + ListNLBs(ctx context.Context, datacenterID string) (*sdk.NetworkLoadBalancers, error) + // CreateNLBForwardingRule creates a new forwarding rule with the provided properties in the specified data center and NLB. + CreateNLBForwardingRule(ctx context.Context, datacenterID, nlbID string, rule sdk.NetworkLoadBalancerForwardingRule) (string, error) + // UpdateNLBForwardingRule updates the forwarding rule identified by ruleID with the provided properties in the specified data center and NLB. + UpdateNLBForwardingRule(ctx context.Context, datacenterID, nlbID, ruleID string, rule sdk.NetworkLoadBalancerForwardingRule) (string, error) } diff --git a/internal/ionoscloud/client/client.go b/internal/ionoscloud/client/client.go index 76e021f3..c4196c79 100644 --- a/internal/ionoscloud/client/client.go +++ b/internal/ionoscloud/client/client.go @@ -195,6 +195,29 @@ func (c *IonosCloudClient) StartServer(ctx context.Context, datacenterID, server return "", errLocationHeaderEmpty } +// CreateNIC creates a new NIC with the provided properties in the specified data center and server. +func (c *IonosCloudClient) CreateNIC( + ctx context.Context, datacenterID, serverID string, properties sdk.NicProperties, +) (string, error) { + if datacenterID == "" { + return "", errDatacenterIDIsEmpty + } + if serverID == "" { + return "", errServerIDIsEmpty + } + nic := sdk.Nic{ + Properties: &properties, + } + _, req, err := c.API.NetworkInterfacesApi.DatacentersServersNicsPost(ctx, datacenterID, serverID).Nic(nic).Execute() + if err != nil { + return "", fmt.Errorf(apiCallErrWrapper, err) + } + if location := req.Header.Get(locationHeaderKey); location != "" { + return location, nil + } + return "", errLocationHeaderEmpty +} + // DeleteVolume deletes the volume that matches the provided volumeID in the specified data center. func (c *IonosCloudClient) DeleteVolume(ctx context.Context, datacenterID, volumeID string) (string, error) { if datacenterID == "" { @@ -217,6 +240,21 @@ func (c *IonosCloudClient) DeleteVolume(ctx context.Context, datacenterID, volum return "", errLocationHeaderEmpty } +// GetLAN returns the LAN that matches the provided lanID in the specified data center. +func (c *IonosCloudClient) GetLAN(ctx context.Context, datacenterID, lanID string) (*sdk.Lan, error) { + if datacenterID == "" { + return nil, errDatacenterIDIsEmpty + } + if lanID == "" { + return nil, errLANIDIsEmpty + } + lan, _, err := c.API.LANsApi.DatacentersLansFindById(ctx, datacenterID, lanID).Depth(c.requestDepth).Execute() + if err != nil { + return nil, fmt.Errorf(apiCallErrWrapper, err) + } + return &lan, nil +} + // CreateLAN creates a new LAN with the provided properties in the specified data center, // returning the request location. func (c *IonosCloudClient) CreateLAN(ctx context.Context, datacenterID string, properties sdk.LanProperties, @@ -500,3 +538,152 @@ func (c *IonosCloudClient) ListLabels(ctx context.Context) ([]sdk.Label, error) return *labels.Items, nil } + +// CreateNLB creates a new Network Load Balancer with the provided properties in the specified data center. +func (c *IonosCloudClient) CreateNLB(ctx context.Context, datacenterID string, properties sdk.NetworkLoadBalancerProperties) (string, error) { + if datacenterID == "" { + return "", errDatacenterIDIsEmpty + } + + newNLB := sdk.NetworkLoadBalancer{ + Properties: &properties, + } + + _, res, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersPost(ctx, datacenterID). + NetworkLoadBalancer(newNLB). + Execute() + if err != nil { + return "", fmt.Errorf(apiCallErrWrapper, err) + } + + if location := res.Header.Get(locationHeaderKey); location != "" { + return location, nil + } + + return "", errLocationHeaderEmpty +} + +// DeleteNLB deletes the Network Load Balancer identified by the ID in the specified data center. +func (c *IonosCloudClient) DeleteNLB(ctx context.Context, datacenterID, nlbID string) (string, error) { + if datacenterID == "" { + return "", errDatacenterIDIsEmpty + } + + if nlbID == "" { + return "", errNLBIDIsEmpty + } + + req, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersDelete(ctx, datacenterID, nlbID). + Execute() + if err != nil { + return "", fmt.Errorf(apiCallErrWrapper, err) + } + + if location := req.Header.Get(locationHeaderKey); location != "" { + return location, nil + } + + return "", errLocationHeaderEmpty +} + +// GetNLB returns the Network Load Balancer identified by the ID in the specified data center. +func (c *IonosCloudClient) GetNLB(ctx context.Context, datacenterID, nlbID string) (*sdk.NetworkLoadBalancer, error) { + if datacenterID == "" { + return nil, errDatacenterIDIsEmpty + } + + if nlbID == "" { + return nil, errNLBIDIsEmpty + } + + nlb, _, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersFindByNetworkLoadBalancerId(ctx, datacenterID, nlbID). + Depth(c.requestDepth). + Execute() + if err != nil { + return nil, fmt.Errorf(apiCallErrWrapper, err) + } + + return &nlb, nil +} + +// ListNLBs returns a list of Network Load Balancers in the specified data center. +func (c *IonosCloudClient) ListNLBs(ctx context.Context, datacenterID string) (*sdk.NetworkLoadBalancers, error) { + if datacenterID == "" { + return nil, errDatacenterIDIsEmpty + } + + nlbs, _, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersGet(ctx, datacenterID). + Depth(c.requestDepth). + Execute() + if err != nil { + return nil, fmt.Errorf(apiCallErrWrapper, err) + } + + return &nlbs, nil +} + +// CreateNLBForwardingRule creates a new forwarding rule with the provided properties in the specified data center and NLB. +func (c *IonosCloudClient) CreateNLBForwardingRule(ctx context.Context, datacenterID, nlbID string, rule sdk.NetworkLoadBalancerForwardingRule) (string, error) { + if datacenterID == "" { + return "", errDatacenterIDIsEmpty + } + + if nlbID == "" { + return "", errNLBIDIsEmpty + } + + _, req, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersForwardingrulesPost(ctx, datacenterID, nlbID). + NetworkLoadBalancerForwardingRule(rule). + Execute() + if err != nil { + return "", fmt.Errorf(apiCallErrWrapper, err) + } + + if location := req.Header.Get(locationHeaderKey); location != "" { + return location, nil + } + + return "", errLocationHeaderEmpty +} + +// UpdateNLBForwardingRule updates the forwarding rule identified by ruleID with the provided properties in the specified data center and NLB. +func (c *IonosCloudClient) UpdateNLBForwardingRule( + ctx context.Context, + datacenterID, nlbID, ruleID string, + rule sdk.NetworkLoadBalancerForwardingRule, +) (string, error) { + if datacenterID == "" { + return "", errDatacenterIDIsEmpty + } + + if nlbID == "" { + return "", errNLBIDIsEmpty + } + + if ruleID == "" { + return "", errNLBRuleIDIsEmpty + } + + putRule := sdk.NetworkLoadBalancerForwardingRulePut{ + Properties: rule.Properties, + } + + _, req, err := c.API.NetworkLoadBalancersApi. + DatacentersNetworkloadbalancersForwardingrulesPut(ctx, datacenterID, nlbID, ruleID). + NetworkLoadBalancerForwardingRule(putRule). + Execute() + if err != nil { + return "", fmt.Errorf(apiCallErrWrapper, err) + } + + if location := req.Header.Get(locationHeaderKey); location != "" { + return location, nil + } + + return "", errLocationHeaderEmpty +} diff --git a/internal/ionoscloud/client/errors.go b/internal/ionoscloud/client/errors.go index e882d662..de928052 100644 --- a/internal/ionoscloud/client/errors.go +++ b/internal/ionoscloud/client/errors.go @@ -25,6 +25,8 @@ var ( errLANIDIsEmpty = errors.New("error parsing LAN ID: value cannot be empty") errNICIDIsEmpty = errors.New("error parsing NIC ID: value cannot be empty") errIPBlockIDIsEmpty = errors.New("error parsing IP block ID: value cannot be empty") + errNLBIDIsEmpty = errors.New("error parsing NLB ID: value cannot be empty") + errNLBRuleIDIsEmpty = errors.New("error parsing NLB rule ID: value cannot be empty") errRequestURLIsEmpty = errors.New("a request URL is necessary for the operation") errLocationHeaderEmpty = errors.New(apiNoLocationErrMessage) ) diff --git a/internal/ionoscloud/clienttest/mock_client.go b/internal/ionoscloud/clienttest/mock_client.go index 000077ce..09ea2a39 100644 --- a/internal/ionoscloud/clienttest/mock_client.go +++ b/internal/ionoscloud/clienttest/mock_client.go @@ -155,6 +155,182 @@ func (_c *MockClient_CreateLAN_Call) RunAndReturn(run func(context.Context, stri return _c } +// CreateNIC provides a mock function with given fields: ctx, datacenterID, serverID, properties +func (_m *MockClient) CreateNIC(ctx context.Context, datacenterID string, serverID string, properties ionoscloud.NicProperties) (string, error) { + ret := _m.Called(ctx, datacenterID, serverID, properties) + + if len(ret) == 0 { + panic("no return value specified for CreateNIC") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, ionoscloud.NicProperties) (string, error)); ok { + return rf(ctx, datacenterID, serverID, properties) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, ionoscloud.NicProperties) string); ok { + r0 = rf(ctx, datacenterID, serverID, properties) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, ionoscloud.NicProperties) error); ok { + r1 = rf(ctx, datacenterID, serverID, properties) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_CreateNIC_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateNIC' +type MockClient_CreateNIC_Call struct { + *mock.Call +} + +// CreateNIC is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - serverID string +// - properties ionoscloud.NicProperties +func (_e *MockClient_Expecter) CreateNIC(ctx interface{}, datacenterID interface{}, serverID interface{}, properties interface{}) *MockClient_CreateNIC_Call { + return &MockClient_CreateNIC_Call{Call: _e.mock.On("CreateNIC", ctx, datacenterID, serverID, properties)} +} + +func (_c *MockClient_CreateNIC_Call) Run(run func(ctx context.Context, datacenterID string, serverID string, properties ionoscloud.NicProperties)) *MockClient_CreateNIC_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(ionoscloud.NicProperties)) + }) + return _c +} + +func (_c *MockClient_CreateNIC_Call) Return(_a0 string, _a1 error) *MockClient_CreateNIC_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_CreateNIC_Call) RunAndReturn(run func(context.Context, string, string, ionoscloud.NicProperties) (string, error)) *MockClient_CreateNIC_Call { + _c.Call.Return(run) + return _c +} + +// CreateNLB provides a mock function with given fields: ctx, datacenterID, properties +func (_m *MockClient) CreateNLB(ctx context.Context, datacenterID string, properties ionoscloud.NetworkLoadBalancerProperties) (string, error) { + ret := _m.Called(ctx, datacenterID, properties) + + if len(ret) == 0 { + panic("no return value specified for CreateNLB") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, ionoscloud.NetworkLoadBalancerProperties) (string, error)); ok { + return rf(ctx, datacenterID, properties) + } + if rf, ok := ret.Get(0).(func(context.Context, string, ionoscloud.NetworkLoadBalancerProperties) string); ok { + r0 = rf(ctx, datacenterID, properties) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, ionoscloud.NetworkLoadBalancerProperties) error); ok { + r1 = rf(ctx, datacenterID, properties) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_CreateNLB_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateNLB' +type MockClient_CreateNLB_Call struct { + *mock.Call +} + +// CreateNLB is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - properties ionoscloud.NetworkLoadBalancerProperties +func (_e *MockClient_Expecter) CreateNLB(ctx interface{}, datacenterID interface{}, properties interface{}) *MockClient_CreateNLB_Call { + return &MockClient_CreateNLB_Call{Call: _e.mock.On("CreateNLB", ctx, datacenterID, properties)} +} + +func (_c *MockClient_CreateNLB_Call) Run(run func(ctx context.Context, datacenterID string, properties ionoscloud.NetworkLoadBalancerProperties)) *MockClient_CreateNLB_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(ionoscloud.NetworkLoadBalancerProperties)) + }) + return _c +} + +func (_c *MockClient_CreateNLB_Call) Return(_a0 string, _a1 error) *MockClient_CreateNLB_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_CreateNLB_Call) RunAndReturn(run func(context.Context, string, ionoscloud.NetworkLoadBalancerProperties) (string, error)) *MockClient_CreateNLB_Call { + _c.Call.Return(run) + return _c +} + +// CreateNLBForwardingRule provides a mock function with given fields: ctx, datacenterID, nlbID, rule +func (_m *MockClient) CreateNLBForwardingRule(ctx context.Context, datacenterID string, nlbID string, rule ionoscloud.NetworkLoadBalancerForwardingRule) (string, error) { + ret := _m.Called(ctx, datacenterID, nlbID, rule) + + if len(ret) == 0 { + panic("no return value specified for CreateNLBForwardingRule") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) (string, error)); ok { + return rf(ctx, datacenterID, nlbID, rule) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) string); ok { + r0 = rf(ctx, datacenterID, nlbID, rule) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) error); ok { + r1 = rf(ctx, datacenterID, nlbID, rule) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_CreateNLBForwardingRule_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateNLBForwardingRule' +type MockClient_CreateNLBForwardingRule_Call struct { + *mock.Call +} + +// CreateNLBForwardingRule is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - nlbID string +// - rule ionoscloud.NetworkLoadBalancerForwardingRule +func (_e *MockClient_Expecter) CreateNLBForwardingRule(ctx interface{}, datacenterID interface{}, nlbID interface{}, rule interface{}) *MockClient_CreateNLBForwardingRule_Call { + return &MockClient_CreateNLBForwardingRule_Call{Call: _e.mock.On("CreateNLBForwardingRule", ctx, datacenterID, nlbID, rule)} +} + +func (_c *MockClient_CreateNLBForwardingRule_Call) Run(run func(ctx context.Context, datacenterID string, nlbID string, rule ionoscloud.NetworkLoadBalancerForwardingRule)) *MockClient_CreateNLBForwardingRule_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(ionoscloud.NetworkLoadBalancerForwardingRule)) + }) + return _c +} + +func (_c *MockClient_CreateNLBForwardingRule_Call) Return(_a0 string, _a1 error) *MockClient_CreateNLBForwardingRule_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_CreateNLBForwardingRule_Call) RunAndReturn(run func(context.Context, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) (string, error)) *MockClient_CreateNLBForwardingRule_Call { + _c.Call.Return(run) + return _c +} + // CreateServer provides a mock function with given fields: ctx, datacenterID, properties, entities func (_m *MockClient) CreateServer(ctx context.Context, datacenterID string, properties ionoscloud.ServerProperties, entities ionoscloud.ServerEntities) (*ionoscloud.Server, string, error) { ret := _m.Called(ctx, datacenterID, properties, entities) @@ -338,6 +514,64 @@ func (_c *MockClient_DeleteLAN_Call) RunAndReturn(run func(context.Context, stri return _c } +// DeleteNLB provides a mock function with given fields: ctx, datacenterID, nlbID +func (_m *MockClient) DeleteNLB(ctx context.Context, datacenterID string, nlbID string) (string, error) { + ret := _m.Called(ctx, datacenterID, nlbID) + + if len(ret) == 0 { + panic("no return value specified for DeleteNLB") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (string, error)); ok { + return rf(ctx, datacenterID, nlbID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) string); ok { + r0 = rf(ctx, datacenterID, nlbID) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, datacenterID, nlbID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_DeleteNLB_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteNLB' +type MockClient_DeleteNLB_Call struct { + *mock.Call +} + +// DeleteNLB is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - nlbID string +func (_e *MockClient_Expecter) DeleteNLB(ctx interface{}, datacenterID interface{}, nlbID interface{}) *MockClient_DeleteNLB_Call { + return &MockClient_DeleteNLB_Call{Call: _e.mock.On("DeleteNLB", ctx, datacenterID, nlbID)} +} + +func (_c *MockClient_DeleteNLB_Call) Run(run func(ctx context.Context, datacenterID string, nlbID string)) *MockClient_DeleteNLB_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockClient_DeleteNLB_Call) Return(_a0 string, _a1 error) *MockClient_DeleteNLB_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_DeleteNLB_Call) RunAndReturn(run func(context.Context, string, string) (string, error)) *MockClient_DeleteNLB_Call { + _c.Call.Return(run) + return _c +} + // DeleteServer provides a mock function with given fields: ctx, datacenterID, serverID, deleteVolumes func (_m *MockClient) DeleteServer(ctx context.Context, datacenterID string, serverID string, deleteVolumes bool) (string, error) { ret := _m.Called(ctx, datacenterID, serverID, deleteVolumes) @@ -630,6 +864,126 @@ func (_c *MockClient_GetImage_Call) RunAndReturn(run func(context.Context, strin return _c } +// GetLAN provides a mock function with given fields: ctx, datacenterID, lanID +func (_m *MockClient) GetLAN(ctx context.Context, datacenterID string, lanID string) (*ionoscloud.Lan, error) { + ret := _m.Called(ctx, datacenterID, lanID) + + if len(ret) == 0 { + panic("no return value specified for GetLAN") + } + + var r0 *ionoscloud.Lan + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*ionoscloud.Lan, error)); ok { + return rf(ctx, datacenterID, lanID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *ionoscloud.Lan); ok { + r0 = rf(ctx, datacenterID, lanID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ionoscloud.Lan) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, datacenterID, lanID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_GetLAN_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLAN' +type MockClient_GetLAN_Call struct { + *mock.Call +} + +// GetLAN is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - lanID string +func (_e *MockClient_Expecter) GetLAN(ctx interface{}, datacenterID interface{}, lanID interface{}) *MockClient_GetLAN_Call { + return &MockClient_GetLAN_Call{Call: _e.mock.On("GetLAN", ctx, datacenterID, lanID)} +} + +func (_c *MockClient_GetLAN_Call) Run(run func(ctx context.Context, datacenterID string, lanID string)) *MockClient_GetLAN_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockClient_GetLAN_Call) Return(_a0 *ionoscloud.Lan, _a1 error) *MockClient_GetLAN_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_GetLAN_Call) RunAndReturn(run func(context.Context, string, string) (*ionoscloud.Lan, error)) *MockClient_GetLAN_Call { + _c.Call.Return(run) + return _c +} + +// GetNLB provides a mock function with given fields: ctx, datacenterID, nlbID +func (_m *MockClient) GetNLB(ctx context.Context, datacenterID string, nlbID string) (*ionoscloud.NetworkLoadBalancer, error) { + ret := _m.Called(ctx, datacenterID, nlbID) + + if len(ret) == 0 { + panic("no return value specified for GetNLB") + } + + var r0 *ionoscloud.NetworkLoadBalancer + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*ionoscloud.NetworkLoadBalancer, error)); ok { + return rf(ctx, datacenterID, nlbID) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *ionoscloud.NetworkLoadBalancer); ok { + r0 = rf(ctx, datacenterID, nlbID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ionoscloud.NetworkLoadBalancer) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, datacenterID, nlbID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_GetNLB_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetNLB' +type MockClient_GetNLB_Call struct { + *mock.Call +} + +// GetNLB is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - nlbID string +func (_e *MockClient_Expecter) GetNLB(ctx interface{}, datacenterID interface{}, nlbID interface{}) *MockClient_GetNLB_Call { + return &MockClient_GetNLB_Call{Call: _e.mock.On("GetNLB", ctx, datacenterID, nlbID)} +} + +func (_c *MockClient_GetNLB_Call) Run(run func(ctx context.Context, datacenterID string, nlbID string)) *MockClient_GetNLB_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockClient_GetNLB_Call) Return(_a0 *ionoscloud.NetworkLoadBalancer, _a1 error) *MockClient_GetNLB_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_GetNLB_Call) RunAndReturn(run func(context.Context, string, string) (*ionoscloud.NetworkLoadBalancer, error)) *MockClient_GetNLB_Call { + _c.Call.Return(run) + return _c +} + // GetRequests provides a mock function with given fields: ctx, method, path func (_m *MockClient) GetRequests(ctx context.Context, method string, path string) ([]ionoscloud.Request, error) { ret := _m.Called(ctx, method, path) @@ -925,6 +1279,65 @@ func (_c *MockClient_ListLabels_Call) RunAndReturn(run func(context.Context) ([] return _c } +// ListNLBs provides a mock function with given fields: ctx, datacenterID +func (_m *MockClient) ListNLBs(ctx context.Context, datacenterID string) (*ionoscloud.NetworkLoadBalancers, error) { + ret := _m.Called(ctx, datacenterID) + + if len(ret) == 0 { + panic("no return value specified for ListNLBs") + } + + var r0 *ionoscloud.NetworkLoadBalancers + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ionoscloud.NetworkLoadBalancers, error)); ok { + return rf(ctx, datacenterID) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ionoscloud.NetworkLoadBalancers); ok { + r0 = rf(ctx, datacenterID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ionoscloud.NetworkLoadBalancers) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, datacenterID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_ListNLBs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListNLBs' +type MockClient_ListNLBs_Call struct { + *mock.Call +} + +// ListNLBs is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +func (_e *MockClient_Expecter) ListNLBs(ctx interface{}, datacenterID interface{}) *MockClient_ListNLBs_Call { + return &MockClient_ListNLBs_Call{Call: _e.mock.On("ListNLBs", ctx, datacenterID)} +} + +func (_c *MockClient_ListNLBs_Call) Run(run func(ctx context.Context, datacenterID string)) *MockClient_ListNLBs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockClient_ListNLBs_Call) Return(_a0 *ionoscloud.NetworkLoadBalancers, _a1 error) *MockClient_ListNLBs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_ListNLBs_Call) RunAndReturn(run func(context.Context, string) (*ionoscloud.NetworkLoadBalancers, error)) *MockClient_ListNLBs_Call { + _c.Call.Return(run) + return _c +} + // ListServers provides a mock function with given fields: ctx, datacenterID func (_m *MockClient) ListServers(ctx context.Context, datacenterID string) (*ionoscloud.Servers, error) { ret := _m.Called(ctx, datacenterID) @@ -1220,6 +1633,66 @@ func (_c *MockClient_StartServer_Call) RunAndReturn(run func(context.Context, st return _c } +// UpdateNLBForwardingRule provides a mock function with given fields: ctx, datacenterID, nlbID, ruleID, rule +func (_m *MockClient) UpdateNLBForwardingRule(ctx context.Context, datacenterID string, nlbID string, ruleID string, rule ionoscloud.NetworkLoadBalancerForwardingRule) (string, error) { + ret := _m.Called(ctx, datacenterID, nlbID, ruleID, rule) + + if len(ret) == 0 { + panic("no return value specified for UpdateNLBForwardingRule") + } + + var r0 string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) (string, error)); ok { + return rf(ctx, datacenterID, nlbID, ruleID, rule) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) string); ok { + r0 = rf(ctx, datacenterID, nlbID, ruleID, rule) + } else { + r0 = ret.Get(0).(string) + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) error); ok { + r1 = rf(ctx, datacenterID, nlbID, ruleID, rule) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockClient_UpdateNLBForwardingRule_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateNLBForwardingRule' +type MockClient_UpdateNLBForwardingRule_Call struct { + *mock.Call +} + +// UpdateNLBForwardingRule is a helper method to define mock.On call +// - ctx context.Context +// - datacenterID string +// - nlbID string +// - ruleID string +// - rule ionoscloud.NetworkLoadBalancerForwardingRule +func (_e *MockClient_Expecter) UpdateNLBForwardingRule(ctx interface{}, datacenterID interface{}, nlbID interface{}, ruleID interface{}, rule interface{}) *MockClient_UpdateNLBForwardingRule_Call { + return &MockClient_UpdateNLBForwardingRule_Call{Call: _e.mock.On("UpdateNLBForwardingRule", ctx, datacenterID, nlbID, ruleID, rule)} +} + +func (_c *MockClient_UpdateNLBForwardingRule_Call) Run(run func(ctx context.Context, datacenterID string, nlbID string, ruleID string, rule ionoscloud.NetworkLoadBalancerForwardingRule)) *MockClient_UpdateNLBForwardingRule_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(ionoscloud.NetworkLoadBalancerForwardingRule)) + }) + return _c +} + +func (_c *MockClient_UpdateNLBForwardingRule_Call) Return(_a0 string, _a1 error) *MockClient_UpdateNLBForwardingRule_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockClient_UpdateNLBForwardingRule_Call) RunAndReturn(run func(context.Context, string, string, string, ionoscloud.NetworkLoadBalancerForwardingRule) (string, error)) *MockClient_UpdateNLBForwardingRule_Call { + _c.Call.Return(run) + return _c +} + // WaitForRequest provides a mock function with given fields: ctx, requestURL func (_m *MockClient) WaitForRequest(ctx context.Context, requestURL string) error { ret := _m.Called(ctx, requestURL) diff --git a/internal/loadbalancing/provisioner.go b/internal/loadbalancing/provisioner.go index 126031f1..f7091592 100644 --- a/internal/loadbalancing/provisioner.go +++ b/internal/loadbalancing/provisioner.go @@ -37,12 +37,10 @@ type Provisioner interface { } // NewProvisioner creates a new load balancer provisioner, based on the load balancer type. -func NewProvisioner(_ *cloud.Service, source infrav1.LoadBalancerSource) (Provisioner, error) { - switch { - case source.KubeVIP != nil: - return &kubeVIPProvisioner{}, nil - case source.NLB != nil: - return &nlbProvisioner{}, nil +func NewProvisioner(svc *cloud.Service, source infrav1.LoadBalancerSource) (Provisioner, error) { + if source.NLB != nil { + return &nlbProvisioner{svc: svc}, nil } + return nil, fmt.Errorf("unknown load balancer config %#v", source) } diff --git a/internal/loadbalancing/provisioner_kubevip.go b/internal/loadbalancing/provisioner_kubevip.go deleted file mode 100644 index ee3eaf0c..00000000 --- a/internal/loadbalancing/provisioner_kubevip.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2024 IONOS Cloud. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package loadbalancing - -import ( - "context" - - "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" -) - -type kubeVIPProvisioner struct{} - -func (*kubeVIPProvisioner) Provision(_ context.Context, _ *scope.LoadBalancer) (requeue bool, err error) { - panic("implement me") -} - -func (*kubeVIPProvisioner) Destroy(_ context.Context, _ *scope.LoadBalancer) (requeue bool, err error) { - panic("implement me") -} diff --git a/internal/loadbalancing/provisioner_nlb.go b/internal/loadbalancing/provisioner_nlb.go index cfc8d1d4..b95eaa66 100644 --- a/internal/loadbalancing/provisioner_nlb.go +++ b/internal/loadbalancing/provisioner_nlb.go @@ -19,15 +19,31 @@ package loadbalancing import ( "context" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/service/cloud" "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) -type nlbProvisioner struct{} +type nlbProvisioner struct { + svc *cloud.Service +} + +// Provision is responsible for creating the Network Load Balancer. +func (n *nlbProvisioner) Provision(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + requeue, err = n.svc.ReconcileNLBNetworks(ctx, lb) + if err != nil || requeue { + return requeue, err + } -func (*nlbProvisioner) Provision(_ context.Context, _ *scope.LoadBalancer) (requeue bool, err error) { - panic("implement me") + return n.svc.ReconcileNLB(ctx, lb) } -func (*nlbProvisioner) Destroy(_ context.Context, _ *scope.LoadBalancer) (requeue bool, err error) { - panic("implement me") +func (n *nlbProvisioner) Destroy(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + // Destroy NLB + requeue, err = n.svc.ReconcileNLBDeletion(ctx, lb) + if err != nil || requeue { + return requeue, err + } + + // Destroy LANs + return n.svc.ReconcileNLBNetworksDeletion(ctx, lb) } diff --git a/internal/service/cloud/ipblock_test.go b/internal/service/cloud/ipblock_test.go index 28dbcce4..a4d47272 100644 --- a/internal/service/cloud/ipblock_test.go +++ b/internal/service/cloud/ipblock_test.go @@ -405,7 +405,7 @@ func (s *ipBlockTestSuite) TestReconcileFailoverIPBlockDeletion() { s.mockListIPBlocksCall().Return(&sdk.IpBlocks{Items: &[]sdk.IpBlock{*ipBlock}}, nil).Once() s.mockGetIPBlocksRequestsDeleteCall(exampleIPBlockID).Return(nil, nil).Once() s.mockDeleteIPBlockCall().Return(exampleRequestPath, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() requeue, err := s.service.ReconcileFailoverIPBlockDeletion(s.ctx, s.machineScope) s.NoError(err) @@ -424,7 +424,7 @@ func (s *ipBlockTestSuite) TestReconcileFailoverIPBlockDeletionSkipped() { s.mockGetDatacenterLocationByIDCall(exampleDatacenterID).Return(exampleLocation, nil).Once() s.mockListIPBlocksCall().Return(&sdk.IpBlocks{Items: &[]sdk.IpBlock{*ipBlock}}, nil).Once() s.mockGetIPBlocksRequestsDeleteCall(exampleIPBlockID).Return(nil, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() requeue, err := s.service.ReconcileFailoverIPBlockDeletion(s.ctx, s.machineScope) s.NoError(err) diff --git a/internal/service/cloud/loadbalancer.go b/internal/service/cloud/loadbalancer.go new file mode 100644 index 00000000..43685925 --- /dev/null +++ b/internal/service/cloud/loadbalancer.go @@ -0,0 +1,783 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + + sdk "github.com/ionos-cloud/sdk-go/v6" + "golang.org/x/sync/errgroup" + "k8s.io/apimachinery/pkg/util/sets" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/ptr" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" +) + +const ( + lanTypePublic string = "in" + lanTypePrivate string = "out" +) + +func (*Service) nlbLANName(lb *infrav1.IonosCloudLoadBalancer, lanType string) string { + return fmt.Sprintf("lan-%s-%s-%s", + lanType, + lb.Namespace, + lb.Name, + ) +} + +func (*Service) nlbNICName(lb *infrav1.IonosCloudLoadBalancer) string { + return fmt.Sprintf("nic-nlb-%s-%s", + lb.Namespace, + lb.Name, + ) +} + +func (*Service) nlbName(lb *infrav1.IonosCloudLoadBalancer) string { + return lb.Namespace + "-" + lb.Name +} + +// ReconcileNLB ensures that a Network Load Balancer will be created and have the correct rules applied. +// +// The Network Load Balancer setup consists of multiple cloud components. +// * Network Load Balancer (NLB) itself +// * Public and Private LAN +// * Forwarding rules +// * Machines receiving the traffic +// NLB connects to two LANs, one for the public network and one for the private network. +// We need to ensure that all machines that need to be load balanced, have a NIC which is connected to the private LAN. +// Once the machines have been provisioned and the NICs are attached, we can define the forwarding rules. +func (s *Service) ReconcileNLB(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + logger := s.logger.WithName("ReconcileNLB") + logger.V(4).Info("Reconciling NLB") + + // Check if NLB needs to be created + + nlb, requeue, err := s.ensureNLB(ctx, lb) + if err != nil || requeue { + return requeue, err + } + + return s.ensureForwardingRules(ctx, lb, nlb) +} + +func (s *Service) ensureNLB( + ctx context.Context, + lb *scope.LoadBalancer, +) (nlb *sdk.NetworkLoadBalancer, requeue bool, err error) { + nlb, request, err := scopedFindResource(ctx, lb, s.getNLB, s.getLatestNLBCreationRequest) + if err != nil { + return nil, true, err + } + + if request != nil && request.isPending() { + // Creation is in progress, we need to wait + return nil, true, nil + } + + if nlb != nil { + if ptr.Deref(nlb.GetId(), "") != "" { + lb.LoadBalancer.SetNLBID(*nlb.GetId()) + } + + if state := getState(nlb); !isAvailable(state) { + // NLB is not ready yet + return nil, true, nil + } + + lb.LoadBalancer.DeleteCurrentRequest() + return nlb, false, nil + } + + ips, err := lb.ResolveEndpoint(ctx) + if err != nil { + return nil, true, err + } + + location, err := s.ionosClient.CreateNLB(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID, sdk.NetworkLoadBalancerProperties{ + Name: ptr.To(s.nlbName(lb.LoadBalancer)), + ListenerLan: ptr.To(lb.LoadBalancer.Status.NLBStatus.PublicLANID), + TargetLan: ptr.To(lb.LoadBalancer.Status.NLBStatus.PrivateLANID), + Ips: ptr.To(ips), + }) + if err != nil { + return nil, true, err + } + + lb.LoadBalancer.SetCurrentRequest(http.MethodPost, sdk.RequestStatusQueued, location) + return nil, true, nil +} + +func (s *Service) getControlPlaneMachines(ctx context.Context, lb *scope.LoadBalancer) (sets.Set[sdk.Server], error) { + // Get CP nodes + cpMachines, err := lb.ClusterScope.ListMachines(ctx, client.MatchingLabels{clusterv1.MachineControlPlaneLabel: ""}) + if err != nil { + return nil, err + } + + if len(cpMachines) == 0 { + return sets.New[sdk.Server](), nil + } + + machineSet := sets.New[string]() + for _, machine := range cpMachines { + machineSet.Insert(machine.ExtractServerID()) + } + + servers, err := s.apiWithDepth(3).ListServers(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID) + if err != nil { + return nil, err + } + + cpServers := sets.New[sdk.Server]() + for _, server := range *servers.GetItems() { + if machineSet.Has(*server.GetId()) { + cpServers.Insert(server) + } + } + + return cpServers, nil +} + +func (s *Service) buildExpectedTargets(cpServers []sdk.Server, lb *scope.LoadBalancer) ([]sdk.NetworkLoadBalancerForwardingRuleTarget, error) { + log := s.logger.WithName("buildExpectedTargets") + targets := make([]sdk.NetworkLoadBalancerForwardingRuleTarget, 0, len(cpServers)) + + for _, server := range cpServers { + nics := ptr.Deref(server.GetEntities().GetNics().GetItems(), []sdk.Nic{}) + nic := findNICByName(nics, s.nlbNICName(lb.LoadBalancer)) + if nic == nil { + log.Info("NIC not found for server", "server", *server.GetId()) + continue + } + + ips := ptr.Deref(nic.GetProperties().GetIps(), []string{}) + if len(ips) == 0 { + return nil, fmt.Errorf("could not find IP for NIC %s", *nic.GetId()) + } + + targets = append(targets, sdk.NetworkLoadBalancerForwardingRuleTarget{ + Ip: ptr.To(ips[0]), + Port: ptr.To(lb.Endpoint().Port), + Weight: ptr.To(int32(1)), + }) + } + return targets, nil +} + +func (s *Service) ensureForwardingRules(ctx context.Context, lb *scope.LoadBalancer, nlb *sdk.NetworkLoadBalancer) (requeue bool, err error) { + cpServers, err := s.getControlPlaneMachines(ctx, lb) + if err != nil || cpServers.Len() == 0 { + return false, err + } + + targets, err := s.buildExpectedTargets(cpServers.UnsortedList(), lb) + if err != nil || len(targets) == 0 { + return false, err + } + + ruleName := "control-plane-rule" // TODO make this configurable? + + rules := ptr.Deref(nlb.GetEntities().GetForwardingrules().GetItems(), []sdk.NetworkLoadBalancerForwardingRule{}) + var existingRule sdk.NetworkLoadBalancerForwardingRule + for _, rule := range rules { + if *rule.GetProperties().GetName() == ruleName { + existingRule = rule + break + } + } + + if !existingRule.HasId() { + expectedRule := sdk.NetworkLoadBalancerForwardingRule{ + Properties: &sdk.NetworkLoadBalancerForwardingRuleProperties{ + Name: ptr.To(ruleName), + Algorithm: ptr.To(lb.LoadBalancer.Spec.NLB.Algorithm), + ListenerIp: ptr.To(lb.Endpoint().Host), + ListenerPort: ptr.To(lb.Endpoint().Port), + Protocol: ptr.To(lb.LoadBalancer.Spec.NLB.Protocol), + Targets: ptr.To(targets), + }, + } + + location, err := s.ionosClient.CreateNLBForwardingRule( + ctx, + lb.LoadBalancer.Spec.NLB.DatacenterID, + *nlb.Id, + expectedRule) + if err != nil { + return true, err + } + + lb.LoadBalancer.SetCurrentRequest(http.MethodPost, sdk.RequestStatusQueued, location) + return true, nil + } + + if !s.targetsValid(*existingRule.GetProperties().GetTargets(), targets) { + existingRule.GetProperties().SetTargets(targets) + location, err := s.ionosClient.UpdateNLBForwardingRule( + ctx, lb.LoadBalancer.Spec.NLB.DatacenterID, + *nlb.GetId(), + *existingRule.GetId(), + existingRule, + ) + if err != nil { + return true, err + } + + lb.LoadBalancer.SetCurrentRequest(http.MethodPut, sdk.RequestStatusQueued, location) + // Update the rule with the new targets + return true, nil + } + + return false, nil +} + +func (*Service) targetsValid(existing, expected []sdk.NetworkLoadBalancerForwardingRuleTarget) bool { + if len(existing) != len(expected) { + return false + } + + existingSet := sets.New[string]() + + for _, r := range existing { + existingSet.Insert(*r.GetIp()) + } + + for _, target := range expected { + if !existingSet.Has(*target.GetIp()) { + return false + } + } + + return true +} + +func (s *Service) getNLB(ctx context.Context, lb *scope.LoadBalancer) (nlb *sdk.NetworkLoadBalancer, err error) { + const apiDepth = 10 + if nlbID := lb.LoadBalancer.GetNLBID(); nlbID != "" { + nlb, err = s.apiWithDepth(apiDepth).GetNLB(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID, nlbID) + if ignoreNotFound(err) != nil { + return nil, err + } + + if nlb != nil { + return nlb, nil + } + } + + // We don't have an ID, we need to find the NLB by name + nlbs, err := s.apiWithDepth(apiDepth).ListNLBs(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID) + if err != nil { + return nil, err + } + + for _, nlb := range ptr.Deref(nlbs.GetItems(), []sdk.NetworkLoadBalancer{}) { + if nlb.Properties.HasName() && *nlb.Properties.Name == s.nlbName(lb.LoadBalancer) { + return &nlb, nil + } + } + + // return not found error + return nil, err +} + +func (*Service) nlbsURL(datacenterID string) string { + return fmt.Sprintf("/datacenters/%s/networkloadbalancers", datacenterID) +} + +func (*Service) nlbURL(datacenterID, nlbID string) string { + return fmt.Sprintf("/datacenters/%s/networkloadbalancers/%s", datacenterID, nlbID) +} + +// ReconcileNLBDeletion ensures the deletion of the NLB. +func (s *Service) ReconcileNLBDeletion(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + log := s.logger.WithName("ReconcileNLBDeletion") + log.V(4).Info("Reconciling NLB deletion") + + // Check if there is an ongoing creation request + + request, err := s.getLatestNLBCreationRequest(ctx, lb) + if err != nil { + return true, fmt.Errorf("could not get latest NLB creation request: %w", err) + } + + if request != nil && request.isPending() { + log.Info("Found pending NLB creation request. Waiting for it to be finished") + return true, nil + } + + // Check if there is an ongoing deletion request + request, err = s.getLatestNLBDeletionRequest(ctx, lb) + if err != nil { + return true, fmt.Errorf("could not get latest NLB deletion request: %w", err) + } + + if request != nil && request.isPending() { + log.Info("Found pending NLB deletion request. Waiting for it to be finished") + return true, nil + } + + nlb, err := s.getNLB(ctx, lb) + if err != nil { + return true, err + } + + if nlb == nil { + // NLB was already deleted + lb.LoadBalancer.DeleteCurrentRequest() + return false, nil + } + + // Delete the NLB + path, err := s.ionosClient.DeleteNLB(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID, *nlb.GetId()) + if err != nil { + return true, err + } + + lb.LoadBalancer.SetCurrentRequest(http.MethodDelete, sdk.RequestStatusQueued, path) + return true, nil +} + +// ReconcileNLBNetworks reconciles the networks for the corresponding NLB. +// +// The following networks need to be created for a basic NLB configuration: +// * Incoming public LAN. This will be used to expose the NLB to the internet. +// * Outgoing private LAN. This LAN will be connected with the NICs of control plane nodes. +func (s *Service) ReconcileNLBNetworks(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + log := s.logger.WithName("ReconcileNLBNetworks") + log.V(4).Info("Reconciling LoadBalancer Networks") + + if requeue, err := s.reconcileIncomingLAN(ctx, lb); err != nil || requeue { + return requeue, err + } + + if requeue, err := s.reconcileOutgoingLAN(ctx, lb); err != nil || requeue { + return requeue, err + } + + if requeue, err := s.reconcileControlPlaneLAN(ctx, lb); err != nil || requeue { + return requeue, err + } + + log.V(4).Info("Successfully reconciled LoadBalancer Networks") + return false, nil +} + +// ReconcileNLBNetworksDeletion handles the deletion of the networks for the corresponding NLB. +func (s *Service) ReconcileNLBNetworksDeletion(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + log := s.logger.WithName("ReconcileNLBNetworksDeletion") + log.V(4).Info("Reconciling LoadBalancer Networks deletion") + + cpMachines, err := lb.ClusterScope.ListMachines(ctx, client.MatchingLabels{clusterv1.MachineControlPlaneLabel: ""}) + if err != nil { + return true, err + } + + if len(cpMachines) > 0 { + log.Info("Control plane machines still exist. Waiting for machines to be deleted") + return true, nil + } + + if requeue, err := s.reconcileOutgoingLANDeletion(ctx, lb); err != nil || requeue { + return requeue, err + } + + if requeue, err := s.reconcileIncomingLANDeletion(ctx, lb); err != nil || requeue { + return requeue, err + } + + log.V(4).Info("Successfully reconciled LoadBalancer Networks deletion") + return false, nil +} + +func (s *Service) reconcileIncomingLAN(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + lan, requeue, err := s.reconcileLoadBalancerLAN(ctx, lb, lanTypePublic) + if err != nil || requeue { + return requeue, err + } + + if lan != nil { + if err := lb.LoadBalancer.SetPublicLANID(ptr.Deref(lan.GetId(), "")); err != nil { + return true, err + } + } + + return false, nil +} + +func (s *Service) reconcileOutgoingLAN(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + lan, requeue, err := s.reconcileLoadBalancerLAN(ctx, lb, lanTypePrivate) + if err != nil || requeue { + return requeue, err + } + + if lan != nil { + if err := lb.LoadBalancer.SetPrivateLANID(ptr.Deref(lan.GetId(), "")); err != nil { + return true, err + } + } + + return false, nil +} + +func (s *Service) reconcileLoadBalancerLAN( + ctx context.Context, + lb *scope.LoadBalancer, + lanType string, +) (lan *sdk.Lan, requeue bool, err error) { + log := s.logger.WithName("createLoadBalancerLAN") + log.Info("Reconciling LoadBalancer LAN", "type", lanType) + + var ( + datacenterID = lb.LoadBalancer.Spec.NLB.DatacenterID + lanName = s.nlbLANName(lb.LoadBalancer, lanType) + ) + + lan, requeue, err = s.findLoadBalancerLANByName(ctx, lb, lanType) + if err != nil || requeue { + return nil, requeue, err + } + + if lan != nil { + if state := getState(lan); !isAvailable(state) { + log.Info("LAN is not yet available. Waiting for it to be available", "state", state) + return nil, true, nil + } + + log.V(4).Info("LAN is available", "ID", *lan.GetId(), "type", lanType) + return lan, false, nil + } + + log.Info("No LAN found, creating LAN for NLB", "type", lanType) + path, err := s.createLoadBalancerLAN(ctx, lanName, datacenterID, lanType == lanTypePublic) + if err != nil { + return nil, false, fmt.Errorf("could not create incoming LAN: %w", err) + } + + // LAN creation is usually fast, so we can wait for it to be finished. + if err := s.ionosClient.WaitForRequest(ctx, path); err != nil { + lb.LoadBalancer.SetCurrentRequest(http.MethodPost, sdk.RequestStatusQueued, path) + return nil, true, err + } + + lan, requeue, err = s.findLoadBalancerLANByName(ctx, lb, lanType) + if err != nil || requeue { + return nil, requeue, err + } + + return lan, false, nil +} + +func (s *Service) createLoadBalancerLAN(ctx context.Context, lanName, datacenterID string, public bool) (requestPath string, err error) { + log := s.logger.WithName("createLoadBalancerLAN") + log.Info("Creating LoadBalancer LAN", "name", lanName, "datacenterID", datacenterID, "public", public) + + return s.ionosClient.CreateLAN(ctx, datacenterID, sdk.LanProperties{ + Name: ptr.To(lanName), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(public), + }) +} + +func (s *Service) reconcileIncomingLANDeletion(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + return s.reconcileLoadBalancerLANDeletion(ctx, lb, lanTypePublic) +} + +func (s *Service) reconcileOutgoingLANDeletion(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + return s.reconcileLoadBalancerLANDeletion(ctx, lb, lanTypePrivate) +} + +func (s *Service) reconcileLoadBalancerLANDeletion(ctx context.Context, lb *scope.LoadBalancer, lanType string) (requeue bool, err error) { + log := s.logger.WithName("reconcileLoadBalancerLANDeletion") + + lanID := lb.LoadBalancer.GetPublicLANID() + if lanType == lanTypePrivate { + lanID = lb.LoadBalancer.GetPrivateLANID() + } + + log.V(4).Info("Deleting LAN for NLB", "ID", lanID) + + // check if the LAN exists or if there is a pending creation request + lan, requeue, err := s.findLoadBalancerLAN(ctx, lb, lanID, lanType) + if err != nil || requeue { + return requeue, err + } + + if lan == nil { + // LAN is already deleted + lb.LoadBalancer.DeleteCurrentRequest() + return false, nil + } + + // check if there is a pending deletion request for the LAN + request, err := s.getLatestLoadBalancerLANDeletionRequest(ctx, lb, lanID) + if err != nil { + return false, fmt.Errorf("could not check for pending LAN deletion request: %w", err) + } + + if request != nil && request.isPending() { + log.Info("Found pending LAN deletion request. Waiting for it to be finished") + return true, nil + } + + return s.deleteAndWaitForLAN(ctx, lb, lanID) +} + +func (s *Service) deleteAndWaitForLAN(ctx context.Context, lb *scope.LoadBalancer, lanID string) (requeue bool, err error) { + path, err := s.deleteLoadBalancerLAN(ctx, lb.LoadBalancer.Spec.NLB.DatacenterID, lanID) + if err != nil { + return true, err + } + + if err := s.ionosClient.WaitForRequest(ctx, path); err != nil { + lb.LoadBalancer.SetCurrentRequest(http.MethodDelete, sdk.RequestStatusQueued, path) + return true, err + } + + return false, nil +} + +func (s *Service) findLoadBalancerLAN( + ctx context.Context, + lb *scope.LoadBalancer, + lanID, lanType string, +) (lan *sdk.Lan, requeue bool, err error) { + if lanID != "" { + return s.findLoadBalancerLANByID(ctx, lb, lanID) + } + + return s.findLoadBalancerLANByName(ctx, lb, lanType) +} + +func (s *Service) findLoadBalancerLANByID( + ctx context.Context, + lb *scope.LoadBalancer, + lanID string, +) (lan *sdk.Lan, requeue bool, err error) { + log := s.logger.WithName("findLoadBalancerLANByID") + + datacenterID := lb.LoadBalancer.Spec.NLB.DatacenterID + + lan, request, err := scopedFindResource( + ctx, lb, + s.getLANByIDFunc(datacenterID, lanID), + s.getLatestLoadBalancerLANCreationRequest(lanID)) + if err != nil { + return nil, true, fmt.Errorf("could not find or create incoming LAN: %w", err) + } + + if request != nil && request.isPending() { + log.Info("Request for incoming LAN is pending. Waiting for it to be finished") + return nil, true, nil + } + + return lan, false, nil +} + +func (s *Service) findLoadBalancerLANByName(ctx context.Context, lb *scope.LoadBalancer, lanType string) (lan *sdk.Lan, requeue bool, err error) { + log := s.logger.WithName("findLoadBalancerLANByName") + + datacenterID := lb.LoadBalancer.Spec.NLB.DatacenterID + lanName := s.nlbLANName(lb.LoadBalancer, lanType) + + lan, request, err := scopedFindResource( + ctx, lb, + s.getLANByNameFunc(datacenterID, lanName), + s.getLatestLoadBalancerLANCreationRequest(lanType)) + if err != nil { + return nil, true, fmt.Errorf("could not find or create incoming LAN: %w", err) + } + + if request != nil && request.isPending() { + log.Info("Request for incoming LAN is pending. Waiting for it to be finished") + return nil, true, nil + } + + return lan, false, nil +} + +func (s *Service) deleteLoadBalancerLAN(ctx context.Context, datacenterID, lanID string) (requestPath string, err error) { + log := s.logger.WithName("createLoadBalancerLAN") + log.Info("Deleting LoadBalancer LAN", "datacenterID", datacenterID, "lanID", lanID) + + return s.ionosClient.DeleteLAN(ctx, datacenterID, lanID) +} + +func (s *Service) getLANByNameFunc(datacenterID, lanName string) func(context.Context, *scope.LoadBalancer) (*sdk.Lan, error) { + return func(ctx context.Context, _ *scope.LoadBalancer) (*sdk.Lan, error) { + // check if the LAN exists + depth := int32(2) // for listing the LANs with their number of NICs + lans, err := s.apiWithDepth(depth).ListLANs(ctx, datacenterID) + if err != nil { + return nil, fmt.Errorf("could not list LANs in data center %s: %w", datacenterID, err) + } + + var ( + lanCount = 0 + foundLAN *sdk.Lan + ) + + for _, l := range ptr.Deref(lans.GetItems(), []sdk.Lan{}) { + if l.Properties.HasName() && *l.Properties.Name == lanName { + foundLAN = &l + lanCount++ + } + + // If there are multiple LANs with the same name, we should return an error. + // Our logic won't be able to proceed as we cannot select the correct LAN. + if lanCount > 1 { + return nil, fmt.Errorf("found multiple LANs with the name: %s", lanName) + } + } + + return foundLAN, nil + } +} + +func findNICByName(nics []sdk.Nic, expectedNICName string) *sdk.Nic { + for _, nic := range nics { + if ptr.Deref(nic.GetProperties().GetName(), "") == expectedNICName { + return &nic + } + } + + return nil +} + +func (s *Service) reconcileControlPlaneLAN(ctx context.Context, lb *scope.LoadBalancer) (requeue bool, err error) { + if lb.LoadBalancer.GetPrivateLANID() == "" { + return true, errors.New("private LAN ID is not set") + } + + cpMachines, err := lb.ClusterScope.ListMachines(ctx, client.MatchingLabels{clusterv1.MachineControlPlaneLabel: ""}) + if err != nil || len(cpMachines) == 0 { + return false, err + } + + expectedNICName := s.nlbNICName(lb.LoadBalancer) + + errGrp, ctx := errgroup.WithContext(ctx) + errGrp.SetLimit(len(cpMachines)) + + for _, machine := range cpMachines { + datacenterID := machine.Spec.DatacenterID + serverID := machine.ExtractServerID() + + if creationPending, err := s.isNICCreationPending(ctx, datacenterID, serverID, expectedNICName); err != nil { + // only return if there is an error. + return true, err + } else if creationPending { + requeue = true + continue + } + + server, err := s.getServerByServerID(ctx, datacenterID, serverID) + if err != nil { + return true, err + } + + if state := getState(server); !isAvailable(state) { + requeue = true + continue + } + + nics := ptr.Deref(server.GetEntities().GetNics().GetItems(), []sdk.Nic{}) + nlbNIC := findNICByName(nics, expectedNICName) + if nlbNIC != nil { + // NIC already exists + continue + } + + lanID, err := strconv.ParseInt(lb.LoadBalancer.GetPrivateLANID(), 10, 32) + if err != nil { + return true, err + } + + newNICProps := sdk.NicProperties{ + Name: ptr.To(expectedNICName), + Dhcp: ptr.To(true), + Lan: ptr.To(int32(lanID)), + } + + errGrp.Go(func() error { + return s.createAndAttachNIC(ctx, datacenterID, serverID, newNICProps) + }) + } + + err = errGrp.Wait() + // If any NIC has a pending creation request, we need to requeue + return requeue, err +} + +func (s *Service) isNICCreationPending(ctx context.Context, datacenterID, serverID, expectedName string) (bool, error) { + // check if there is an ongoing request for the server + request, err := s.getLatestNICCreateRequest(ctx, datacenterID, serverID, expectedName) + if err != nil { + return true, err + } + + if request != nil && request.isPending() { + return true, nil + } + + return false, nil +} + +func (s *Service) getLANByIDFunc(datacenterID, lanID string) func(context.Context, *scope.LoadBalancer) (*sdk.Lan, error) { + return func(ctx context.Context, _ *scope.LoadBalancer) (*sdk.Lan, error) { + return s.ionosClient.GetLAN(ctx, datacenterID, lanID) + } +} + +func (s *Service) getLatestLoadBalancerLANCreationRequest(lanType string) func(context.Context, *scope.LoadBalancer) (*requestInfo, error) { + return func(ctx context.Context, lb *scope.LoadBalancer) (*requestInfo, error) { + return s.getLatestLANRequestByMethod(ctx, http.MethodPost, s.lansURL(lb.LoadBalancer.Spec.NLB.DatacenterID), + matchByName[*sdk.Lan, *sdk.LanProperties](s.nlbLANName(lb.LoadBalancer, lanType))) + } +} + +func (s *Service) getLatestLoadBalancerLANDeletionRequest(ctx context.Context, lb *scope.LoadBalancer, lanID string) (*requestInfo, error) { + return s.getLatestLANRequestByMethod(ctx, http.MethodDelete, s.lanURL(lb.LoadBalancer.Spec.NLB.DatacenterID, lanID)) +} + +func (s *Service) getLatestNLBCreationRequest(ctx context.Context, lb *scope.LoadBalancer) (*requestInfo, error) { + return s.getLatestNLBRequestByMethod(ctx, http.MethodPost, s.nlbsURL(lb.LoadBalancer.Spec.NLB.DatacenterID), + matchByName[*sdk.NetworkLoadBalancer, *sdk.NetworkLoadBalancerProperties](s.nlbName(lb.LoadBalancer))) +} + +func (s *Service) getLatestNLBDeletionRequest(ctx context.Context, lb *scope.LoadBalancer) (*requestInfo, error) { + return s.getLatestNLBRequestByMethod(ctx, http.MethodDelete, s.nlbURL(lb.LoadBalancer.Spec.NLB.DatacenterID, lb.LoadBalancer.Status.NLBStatus.ID)) +} + +func (s *Service) getLatestNLBRequestByMethod( + ctx context.Context, + method, path string, + matcher ...matcherFunc[*sdk.NetworkLoadBalancer], +) (*requestInfo, error) { + return getMatchingRequest( + ctx, s, + method, + path, + matcher...) +} diff --git a/internal/service/cloud/loadbalancer_test.go b/internal/service/cloud/loadbalancer_test.go new file mode 100644 index 00000000..cb6b7481 --- /dev/null +++ b/internal/service/cloud/loadbalancer_test.go @@ -0,0 +1,679 @@ +/* +Copyright 2024 IONOS Cloud. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloud + +import ( + "fmt" + "net/http" + "net/netip" + "testing" + + "github.com/google/uuid" + sdk "github.com/ionos-cloud/sdk-go/v6" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + infrav1 "github.com/ionos-cloud/cluster-api-provider-ionoscloud/api/v1alpha1" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/ionoscloud/clienttest" + "github.com/ionos-cloud/cluster-api-provider-ionoscloud/internal/util/ptr" +) + +type nlbSuite struct { + ServiceTestSuite +} + +func TestNLBSuite(t *testing.T) { + suite.Run(t, new(nlbSuite)) +} + +func (s *nlbSuite) TestNLBLANName() { + nlbLANName := s.service.nlbLANName(s.infraLoadBalancer, lanTypePublic) + expected := "lan-in-" + s.infraLoadBalancer.Namespace + "-" + s.infraLoadBalancer.Name + s.Equal(expected, nlbLANName) +} + +func (s *nlbSuite) TestNLBNICName() { + nlbNICName := s.service.nlbNICName(s.infraLoadBalancer) + expected := "nic-nlb-" + s.infraLoadBalancer.Namespace + "-" + s.infraLoadBalancer.Name + s.Equal(expected, nlbNICName) +} + +func (s *nlbSuite) TestNLBName() { + nlbName := s.service.nlbName(s.infraLoadBalancer) + expected := s.infraLoadBalancer.Namespace + "-" + s.infraLoadBalancer.Name + s.Equal(expected, nlbName) +} + +func (s *nlbSuite) TestReconcileNLBCreateNLB() { + status := s.loadBalancerScope.LoadBalancer.Status.NLBStatus + + status.PublicLANID = 2 + status.PrivateLANID = 3 + + s.mockListNLBsCall().Return(nil, nil).Once() + s.mockGetNLBCreationRequestCall().Return(nil, nil).Once() + s.mockCreateNLBCall().Return("test/nlb/request/path", nil).Once() + + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) + s.NotNil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestReconcileNLBRequestIsPending() { + s.mockListNLBsCall().Return(nil, nil).Once() + s.mockGetNLBCreationRequestCall().Return([]sdk.Request{s.exampleNLBPostRequest()}, nil) + + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBNoControlPlaneMachines() { + s.setupExistingNLBScenario(s.defaultNLB()) + + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + + s.NoError(err) + s.False(requeue) + s.Nil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestReconcileNLBControlPlaneMachinesAvailableNoNICs() { + s.setupExistingNLBScenario(s.defaultNLB()) + + machines := s.createControlPlaneMachines(0, 3) + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + s.mockListServersCall(exampleDatacenterID).Return( + &sdk.Servers{Items: ptr.To(s.machinesToServers(machines))}, nil, + ).Once() + + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + + s.NoError(err) + s.False(requeue) + s.Nil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestReconcileNLBControlPlaneMachinesAvailable() { + s.setupExistingNLBScenario(s.defaultNLB()) + + machines := s.createControlPlaneMachines(0, 3) + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + s.mockListServersCall(exampleDatacenterID).Return( + &sdk.Servers{Items: ptr.To(s.machinesToServers( + machines, + func(server *sdk.Server) { + server.SetEntities(sdk.ServerEntities{ + Nics: &sdk.Nics{ + Items: &[]sdk.Nic{{ + Properties: &sdk.NicProperties{ + Name: ptr.To(s.service.nlbNICName(s.infraLoadBalancer)), + Ips: ptr.To([]string{"203.0.113.10"}), + Lan: ptr.To(int32(2)), + }, + }}, + }, + }) + }))}, nil, + ).Once() + + s.mockCreateNLBForwardingRuleCall(exampleDatacenterID, exampleNLBID).Return("path/to/nlb/", nil).Once() + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + + s.NoError(err) + s.True(requeue) + s.NotNil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestReconcileNLBUpdateForwardingRules() { + nlb := s.defaultNLB() + s.setupExistingNLBScenario(nlb) + + machinesFirstCall := s.createControlPlaneMachines(0, 2) + machinesSecondCall := s.createControlPlaneMachines(2, 2) + + for _, m := range machinesFirstCall { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + ipAddress := new(string) + *ipAddress = "203.0.113.10" + + applyLBNICWithIP := func(server *sdk.Server) { + oldIP, err := netip.ParseAddr(*ipAddress) + s.NoError(err) + nextIP := oldIP.Next() + + server.SetEntities(sdk.ServerEntities{ + Nics: &sdk.Nics{ + Items: &[]sdk.Nic{{ + Properties: &sdk.NicProperties{ + Name: ptr.To(s.service.nlbNICName(s.infraLoadBalancer)), + Ips: ptr.To([]string{oldIP.String()}), + Lan: ptr.To(int32(2)), + }, + }}, + }, + }) + + *ipAddress = nextIP.String() + } + + initialServers := s.machinesToServers(machinesFirstCall, applyLBNICWithIP) + + s.mockListServersCall(exampleDatacenterID).Return(&sdk.Servers{ + Items: ptr.To(initialServers), + }, nil).Once() + s.mockCreateNLBForwardingRuleCall(exampleDatacenterID, exampleNLBID).Return("path/to/nlb/", nil).Once() + + requeue, err := s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) + + // We need to pretend we have a rule now + rule := sdk.NetworkLoadBalancerForwardingRule{ + Id: ptr.To(exampleForwardingRuleID), + Properties: &sdk.NetworkLoadBalancerForwardingRuleProperties{ + Name: ptr.To("control-plane-rule"), + Algorithm: ptr.To("ROUND_ROBIN"), + Protocol: ptr.To("TCP"), + }, + } + + rules := sdk.NetworkLoadBalancerForwardingRules{ + Items: &[]sdk.NetworkLoadBalancerForwardingRule{rule}, + } + + targets := make([]sdk.NetworkLoadBalancerForwardingRuleTarget, 0, len(initialServers)) + + for _, s := range initialServers { + nicIP := (*(*s.Entities.GetNics().GetItems())[0].GetProperties().GetIps())[0] + + targets = append(targets, sdk.NetworkLoadBalancerForwardingRuleTarget{ + Ip: ptr.To(nicIP), + }) + } + + rule.GetProperties().SetTargets(targets) + nlb.Entities.SetForwardingrules(rules) + + for _, m := range machinesSecondCall { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + s.setupExistingNLBScenario(nlb) + + finalServers := s.machinesToServers(machinesSecondCall, applyLBNICWithIP) + allServers := append(initialServers, finalServers...) + + s.mockListServersCall(exampleDatacenterID).Return(&sdk.Servers{ + Items: ptr.To(allServers), + }, nil).Once() + + s.mockUpdateNLBForwardingRuleCall(exampleDatacenterID, exampleNLBID, exampleForwardingRuleID, 4). + Return("/path/to/update", nil).Once() + + requeue, err = s.service.ReconcileNLB(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) + + s.NotNil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestEnsureNLBAvailable() { + s.loadBalancerScope.LoadBalancer.Status.NLBStatus.ID = exampleNLBID + s.loadBalancerScope.LoadBalancer.Status.CurrentRequest = &infrav1.ProvisioningRequest{} + s.mockGetNLBCall(exampleNLBID).Return(s.defaultNLB(), nil).Once() + + nlb, requeue, err := s.service.ensureNLB(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.False(requeue) + s.Nil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) + s.NotNil(nlb) +} + +func (s *nlbSuite) TestReconcileNLBDeletionRequestPending() { + s.mockGetNLBCreationRequestCall().Return([]sdk.Request{s.exampleNLBPostRequest()}, nil).Once() + + requeue, err := s.service.ReconcileNLBDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBDeletionDeletionInProgress() { + s.mockGetNLBCreationRequestCall().Return(nil, nil).Once() + s.mockGetNLBDeletionRequestCall(exampleNLBID).Return([]sdk.Request{s.exampleNLBDeleteRequest(exampleNLBID)}, nil) + s.loadBalancerScope.LoadBalancer.SetNLBID(exampleNLBID) + + requeue, err := s.service.ReconcileNLBDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBDeletion() { + s.setupExistingNLBScenario(s.defaultNLB()) + s.mockGetNLBCreationRequestCall().Return(nil, nil).Once() + s.mockGetNLBDeletionRequestCall(exampleNLBID).Return(nil, nil) + + s.mockDeleteNLBCall(exampleNLBID).Return("path/to/deletion", nil).Once() + + requeue, err := s.service.ReconcileNLBDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) + s.NotNil(s.loadBalancerScope.LoadBalancer.Status.CurrentRequest) +} + +func (s *nlbSuite) TestReconcileLoadBalancerNetworksCreateIncomingAndOutgoing() { + s.mockListLANsCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID).Return(nil, nil).Once() + s.mockGetLANCreationRequestsCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID).Return(nil, nil).Twice() + + s.mockCreateNLBLANCall(sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePublic)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(true), + }).Return("path/to/lan", nil).Once() + + const ( + incomingLANID = "2" + outgoingLANID = "3" + ) + + s.mockWaitForRequestCall("path/to/lan").Return(nil).Twice() + + firstLans := []sdk.Lan{{ + Id: ptr.To(incomingLANID), + Metadata: &sdk.DatacenterElementMetadata{State: ptr.To(sdk.Available)}, + Properties: &sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePublic)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(true), + }, + }} + + call := s.mockListLANsCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID).Return(&sdk.Lans{ + Items: &firstLans, + }, nil).Twice() + + s.mockCreateNLBLANCall(sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePrivate)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(false), + }).Return("path/to/lan", nil).Once().NotBefore(call) + + secondLANs := append(firstLans, sdk.Lan{ + Id: ptr.To(outgoingLANID), + Metadata: &sdk.DatacenterElementMetadata{State: ptr.To(sdk.Available)}, + Properties: &sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePrivate)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(false), + }, + }) + + s.mockListLANsCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID).Return(&sdk.Lans{ + Items: &secondLANs, + }, nil).Once() + + requeue, err := s.service.ReconcileNLBNetworks(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.False(requeue) +} + +func (s *nlbSuite) TestReconcileLoadBalancerNetworksLANsExist() { + const ( + incomingLANID = "2" + outgoingLANID = "3" + ) + + lans := []sdk.Lan{{ + Id: ptr.To(incomingLANID), + Metadata: &sdk.DatacenterElementMetadata{State: ptr.To(sdk.Available)}, + Properties: &sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePublic)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(true), + }, + }, { + Id: ptr.To(outgoingLANID), + Metadata: &sdk.DatacenterElementMetadata{State: ptr.To(sdk.Busy)}, + Properties: &sdk.LanProperties{ + Name: ptr.To(s.service.nlbLANName(s.infraLoadBalancer, lanTypePrivate)), + Ipv6CidrBlock: ptr.To("AUTO"), + Public: ptr.To(false), + }, + }} + s.mockListLANsCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID).Return(&sdk.Lans{Items: &lans}, nil).Twice() + + requeue, err := s.service.ReconcileNLBNetworks(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileControlPlaneLAN() { + machines := s.createControlPlaneMachines(0, 2) + servers := s.machinesToServers(machines) + s.NoError(s.loadBalancerScope.LoadBalancer.SetPrivateLANID("2")) + + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + for _, srv := range servers { + s.mockGetNICCreationRequestCall(*srv.GetId()).Return(nil, nil).Once() + s.mockGetServerCall(s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID, *srv.GetId()). + Return(&srv, nil). + Once() + + s.mockCreateNICCall(*srv.GetId()).Return("path/to/nic", nil).Once() + s.mockWaitForRequestCall("path/to/nic").Return(nil).Once() + } + + requeue, err := s.service.reconcileControlPlaneLAN(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.False(requeue) +} + +func (s *nlbSuite) TestReconcileControlPlaneLANNICCreationPending() { + machines := s.createControlPlaneMachines(0, 3) + servers := s.machinesToServers(machines, s.applyDefaultNIC) + s.NoError(s.loadBalancerScope.LoadBalancer.SetPrivateLANID("2")) + + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + datacenterID := s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID + s.mockGetNICCreationRequestCall(*servers[0].GetId()).Return([]sdk.Request{(s.exampleNICPPostRequest(*servers[0].GetId()))}, nil).Once() + + for _, srv := range servers[1:] { + s.mockGetNICCreationRequestCall(*srv.GetId()).Return(nil, nil).Once() + s.mockGetServerCall(datacenterID, *srv.GetId()). + Return(&srv, nil). + Once() + } + + requeue, err := s.service.reconcileControlPlaneLAN(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileControlPlaneLANServerUnavailable() { + machines := s.createControlPlaneMachines(0, 3) + servers := s.machinesToServers(machines, s.applyDefaultNIC) + s.NoError(s.loadBalancerScope.LoadBalancer.SetPrivateLANID("2")) + + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + datacenterID := s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID + servers[0].Metadata.SetState(sdk.Busy) + + for _, srv := range servers { + s.mockGetNICCreationRequestCall(*srv.GetId()).Return(nil, nil).Once() + s.mockGetServerCall(datacenterID, *srv.GetId()). + Return(&srv, nil). + Once() + } + + requeue, err := s.service.reconcileControlPlaneLAN(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBNetworksDeletionExistingMachine() { + machines := s.createControlPlaneMachines(0, 1) + + for _, m := range machines { + s.NoError(s.k8sClient.Create(s.ctx, &m)) + } + + requeue, err := s.service.ReconcileNLBNetworksDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBNetworksDeletionDeletionPending() { + const privateLANID = "2" + + s.NoError(s.loadBalancerScope.LoadBalancer.SetPrivateLANID(privateLANID)) + s.mockGetLANCall(privateLANID).Return(&sdk.Lan{Id: ptr.To(privateLANID)}, nil).Once() + s.mockGetLANDeletionRequestCall(privateLANID).Return(s.exampleLANDeleteRequest(privateLANID, sdk.RequestStatusQueued), nil).Once() + + requeue, err := s.service.ReconcileNLBNetworksDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.True(requeue) +} + +func (s *nlbSuite) TestReconcileNLBNetworksDeletionDeletion() { + const ( + privateLANID = "2" + publicLANID = "3" + ) + + s.NoError(s.loadBalancerScope.LoadBalancer.SetPrivateLANID(privateLANID)) + s.NoError(s.loadBalancerScope.LoadBalancer.SetPublicLANID(publicLANID)) + + s.mockGetLANCall(privateLANID).Return(&sdk.Lan{Id: ptr.To(privateLANID)}, nil).Once() + s.mockGetLANCall(publicLANID).Return(&sdk.Lan{Id: ptr.To(publicLANID)}, nil).Once() + + s.mockGetLANDeletionRequestCall(privateLANID).Return(nil, nil).Once() + s.mockGetLANDeletionRequestCall(publicLANID).Return(nil, nil).Once() + + s.mockDeleteLANCall(publicLANID).Return("path/to/deletion", nil).Once() + s.mockDeleteLANCall(privateLANID).Return("path/to/deletion", nil).Once() + + s.mockWaitForRequestCall("path/to/deletion").Return(nil).Twice() + + requeue, err := s.service.ReconcileNLBNetworksDeletion(s.ctx, s.loadBalancerScope) + s.NoError(err) + s.False(requeue) +} + +func (s *nlbSuite) setupExistingNLBScenario(nlb *sdk.NetworkLoadBalancer) { + s.T().Helper() + + s.loadBalancerScope.LoadBalancer.Status.NLBStatus.ID = exampleNLBID + s.loadBalancerScope.LoadBalancer.Status.CurrentRequest = &infrav1.ProvisioningRequest{} + s.mockGetNLBCall(exampleNLBID).Return(nlb, nil).Once() +} + +func (s *nlbSuite) mockGetNLBCall(nlbID string) *clienttest.MockClient_GetNLB_Call { + return s.ionosClient.EXPECT().GetNLB(mock.MatchedBy(nonNilCtx), s.infraLoadBalancer.Spec.NLB.DatacenterID, nlbID) +} + +func (s *nlbSuite) mockListNLBsCall() *clienttest.MockClient_ListNLBs_Call { + return s.ionosClient.EXPECT().ListNLBs(mock.MatchedBy(nonNilCtx), s.infraLoadBalancer.Spec.NLB.DatacenterID) +} + +func (s *nlbSuite) mockCreateNLBCall() *clienttest.MockClient_CreateNLB_Call { + return s.ionosClient.EXPECT().CreateNLB(mock.MatchedBy(nonNilCtx), + s.infraLoadBalancer.Spec.NLB.DatacenterID, + mock.Anything, + ) +} + +func (s *nlbSuite) mockDeleteNLBCall(nlbID string) *clienttest.MockClient_DeleteNLB_Call { + return s.ionosClient.EXPECT().DeleteNLB(mock.MatchedBy(nonNilCtx), s.infraLoadBalancer.Spec.NLB.DatacenterID, nlbID) +} + +func (s *nlbSuite) mockCreateNLBForwardingRuleCall(datacenterID, nlbID string) *clienttest.MockClient_CreateNLBForwardingRule_Call { + return s.ionosClient.EXPECT().CreateNLBForwardingRule(mock.MatchedBy(nonNilCtx), datacenterID, nlbID, mock.Anything) +} + +func (s *nlbSuite) mockUpdateNLBForwardingRuleCall(datacenterID, nlbID, ruleID string, targetLen int) *clienttest.MockClient_UpdateNLBForwardingRule_Call { + return s.ionosClient.EXPECT().UpdateNLBForwardingRule(mock.MatchedBy(nonNilCtx), datacenterID, nlbID, ruleID, mock.MatchedBy(func(rule sdk.NetworkLoadBalancerForwardingRule) bool { + targets := *rule.GetProperties().GetTargets() + return len(targets) == targetLen + })) +} + +func (s *nlbSuite) mockGetNLBCreationRequestCall() *clienttest.MockClient_GetRequests_Call { + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodPost, s.service.nlbsURL(s.infraLoadBalancer.Spec.NLB.DatacenterID)) +} + +func (s *nlbSuite) mockGetNLBDeletionRequestCall(nlbID string) *clienttest.MockClient_GetRequests_Call { + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodDelete, s.service.nlbURL(s.infraLoadBalancer.Spec.NLB.DatacenterID, nlbID)) +} + +func (s *nlbSuite) mockGetLANCall(lanID string) *clienttest.MockClient_GetLAN_Call { + return s.ionosClient.EXPECT().GetLAN(mock.MatchedBy(nonNilCtx), s.infraLoadBalancer.Spec.NLB.DatacenterID, lanID) +} + +func (s *nlbSuite) mockCreateNLBLANCall(properties sdk.LanProperties) *clienttest.MockClient_CreateLAN_Call { + return s.ionosClient.EXPECT().CreateLAN(mock.MatchedBy(nonNilCtx), s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID, properties) +} + +func (s *nlbSuite) mockGetNICCreationRequestCall(serverID string) *clienttest.MockClient_GetRequests_Call { + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodPost, s.service.nicsURL(s.infraLoadBalancer.Spec.NLB.DatacenterID, serverID)) +} + +func (s *nlbSuite) mockCreateNICCall(serverID string) *clienttest.MockClient_CreateNIC_Call { + return s.ionosClient.EXPECT().CreateNIC(mock.MatchedBy(nonNilCtx), s.loadBalancerScope.LoadBalancer.Spec.NLB.DatacenterID, serverID, mock.Anything) +} + +func (s *nlbSuite) mockGetLANDeletionRequestCall(lanID string) *clienttest.MockClient_GetRequests_Call { + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodDelete, s.service.lanURL(s.infraLoadBalancer.Spec.NLB.DatacenterID, lanID)) +} + +func (s *nlbSuite) defaultNLB() *sdk.NetworkLoadBalancer { + return &sdk.NetworkLoadBalancer{ + Id: ptr.To(exampleNLBID), + Metadata: &sdk.DatacenterElementMetadata{ + State: ptr.To(sdk.Available), + }, + Properties: &sdk.NetworkLoadBalancerProperties{ + Name: ptr.To(s.service.nlbName(s.infraLoadBalancer)), + }, + Entities: &sdk.NetworkLoadBalancerEntities{}, + } +} + +func (s *nlbSuite) exampleNICPPostRequest(serverID string) sdk.Request { + return s.exampleRequest(requestBuildOptions{ + status: sdk.RequestStatusQueued, + method: http.MethodPost, + url: s.service.nicsURL(exampleDatacenterID, serverID), + body: fmt.Sprintf(`{"properties": {"name": "%s"}}`, s.service.nlbNICName(s.infraLoadBalancer)), + href: exampleRequestPath, + targetID: exampleNICID, + targetType: sdk.NIC, + }) +} + +func (s *nlbSuite) exampleNLBPostRequest() sdk.Request { + return s.exampleRequest(requestBuildOptions{ + status: sdk.RequestStatusQueued, + method: http.MethodPost, + url: s.service.nlbsURL(s.infraLoadBalancer.Spec.NLB.DatacenterID), + body: fmt.Sprintf(`{"properties": {"name": "%s"}}`, s.service.nlbName(s.infraLoadBalancer)), + href: exampleRequestPath, + targetID: exampleNLBID, + targetType: sdk.NETWORKLOADBALANCER, + }) +} + +func (s *nlbSuite) exampleNLBDeleteRequest(nlbID string) sdk.Request { + return s.exampleRequest(requestBuildOptions{ + status: sdk.RequestStatusQueued, + method: http.MethodDelete, + url: s.service.nlbURL(s.infraLoadBalancer.Spec.NLB.DatacenterID, nlbID), + body: fmt.Sprintf(`{"properties": {"name": "%s"}}`, s.service.nlbName(s.infraLoadBalancer)), + href: exampleRequestPath, + targetID: exampleNLBID, + targetType: sdk.NETWORKLOADBALANCER, + }) +} + +func (s *nlbSuite) createControlPlaneMachines(start, count int) []infrav1.IonosCloudMachine { + cpMachines := make([]infrav1.IonosCloudMachine, count) + index := 0 + for nameIndex := start; nameIndex < (start + count); nameIndex++ { + cpMachines[index] = s.createMachineWithLabels( + fmt.Sprintf("cp-test-%d", nameIndex), + uuid.New().String(), + map[string]string{ + clusterv1.ClusterNameLabel: s.clusterScope.Cluster.Name, + clusterv1.MachineControlPlaneLabel: "", + }, + ) + index++ + } + return cpMachines +} + +func (*nlbSuite) createMachineWithLabels(name, id string, labels map[string]string) infrav1.IonosCloudMachine { + return infrav1.IonosCloudMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: metav1.NamespaceDefault, + Labels: labels, + CreationTimestamp: metav1.Now(), + }, + Spec: infrav1.IonosCloudMachineSpec{ + ProviderID: ptr.To(ionosProviderIDPrefix + id), + DatacenterID: exampleDatacenterID, + }, + } +} + +func (*nlbSuite) machinesToServers( + machines []infrav1.IonosCloudMachine, + applyFuncs ...func(*sdk.Server), +) []sdk.Server { + servers := make([]sdk.Server, len(machines)) + for index, m := range machines { + servers[index] = sdk.Server{ + Id: ptr.To(m.ExtractServerID()), + Metadata: &sdk.DatacenterElementMetadata{ + State: ptr.To(sdk.Available), + }, + Properties: &sdk.ServerProperties{ + Name: ptr.To(m.Name), + }, + } + + for _, apply := range applyFuncs { + apply(&servers[index]) + } + } + + return servers +} + +func (s *nlbSuite) applyDefaultNIC(srv *sdk.Server) { + srv.SetEntities(sdk.ServerEntities{ + Nics: &sdk.Nics{ + Items: &[]sdk.Nic{{ + Properties: &sdk.NicProperties{ + Name: ptr.To(s.service.nlbNICName(s.infraLoadBalancer)), + Ips: ptr.To([]string{"203.0.113.11"}), + Lan: ptr.To(int32(2)), + }, + }}, + }, + }) +} diff --git a/internal/service/cloud/network.go b/internal/service/cloud/network.go index 0c39ab40..3f7ae9a9 100644 --- a/internal/service/cloud/network.go +++ b/internal/service/cloud/network.go @@ -647,5 +647,7 @@ func (s *Service) patchLAN(ctx context.Context, ms *scope.Machine, lanID string, } func failoverRequired(ms *scope.Machine) bool { - return util.IsControlPlaneMachine(ms.Machine) || ms.IonosMachine.Spec.FailoverIP != nil + return (util.IsControlPlaneMachine(ms.Machine) && + ms.ClusterScope.IonosCluster.Spec.LoadBalancerProviderRef == nil) || + ms.IonosMachine.Spec.FailoverIP != nil } diff --git a/internal/service/cloud/network_test.go b/internal/service/cloud/network_test.go index d0eb3e5a..f7c4e3dc 100644 --- a/internal/service/cloud/network_test.go +++ b/internal/service/cloud/network_test.go @@ -18,7 +18,6 @@ package cloud import ( "context" - "fmt" "net/http" "testing" "time" @@ -85,7 +84,7 @@ func (s *lanSuite) TestNetworkDeleteLANSuccessful() { func (s *lanSuite) TestNetworkGetLANSuccessful() { lan := s.exampleLAN() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() foundLAN, err := s.service.getLAN(s.ctx, s.machineScope) s.NoError(err) s.NotNil(foundLAN) @@ -93,14 +92,14 @@ func (s *lanSuite) TestNetworkGetLANSuccessful() { } func (s *lanSuite) TestNetworkGetLANNotFound() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() lan, err := s.service.getLAN(s.ctx, s.machineScope) s.NoError(err) s.Nil(lan) } func (s *lanSuite) TestNetworkGetLANErrorNotUnique() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN(), s.exampleLAN()}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN(), s.exampleLAN()}}, nil).Once() lan, err := s.service.getLAN(s.ctx, s.machineScope) s.Error(err) s.Nil(lan) @@ -119,8 +118,8 @@ func (s *lanSuite) TestNetworkRemoveLANPendingRequestFromClusterNoRequest() { } func (s *lanSuite) TestNetworkReconcileLANNoExistingLANNoRequestCreate() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() - s.mockGetLANCreationRequestsCall().Return([]sdk.Request{}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() + s.mockGetLANCreationRequestsCall(s.machineScope.DatacenterID()).Return([]sdk.Request{}, nil).Once() s.mockCreateLANCall().Return(exampleRequestPath, nil).Once() requeue, err := s.service.ReconcileLAN(s.ctx, s.machineScope) s.NoError(err) @@ -128,15 +127,15 @@ func (s *lanSuite) TestNetworkReconcileLANNoExistingLANNoRequestCreate() { } func (s *lanSuite) TestNetworkReconcileLANNoExistingLANExistingRequestPending() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() - s.mockGetLANCreationRequestsCall().Return(s.examplePostRequest(sdk.RequestStatusQueued), nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() + s.mockGetLANCreationRequestsCall(s.machineScope.DatacenterID()).Return(s.exampleLANPostRequest(exampleLANID, sdk.RequestStatusQueued), nil).Once() requeue, err := s.service.ReconcileLAN(s.ctx, s.machineScope) s.NoError(err) s.True(requeue) } func (s *lanSuite) TestNetworkReconcileLANExistingLAN() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() requeue, err := s.service.ReconcileLAN(s.ctx, s.machineScope) s.NoError(err) s.False(requeue) @@ -145,14 +144,14 @@ func (s *lanSuite) TestNetworkReconcileLANExistingLAN() { func (s *lanSuite) TestNetworkReconcileLANExistingLANUnavailable() { lan := s.exampleLAN() lan.Metadata.State = ptr.To("BUSY") - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() requeue, err := s.service.ReconcileLAN(s.ctx, s.machineScope) s.NoError(err) s.True(requeue) } func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsNoPendingRequestsNoOtherUsersDelete() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() s.mockGetLANDeletionRequestsCall().Return([]sdk.Request{}, nil).Once() s.mockDeleteLANCall(exampleLANID).Return(exampleRequestPath, nil).Once() requeue, err := s.service.ReconcileLANDeletion(s.ctx, s.machineScope) @@ -163,7 +162,7 @@ func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsNoPendingRequestsNoOthe func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsNoPendingRequestsHasOtherUsersNoDelete() { lan := s.exampleLAN() lan.Entities.Nics.Items = &[]sdk.Nic{{Id: ptr.To("1")}} - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{lan}}, nil).Once() s.mockGetLANDeletionRequestsCall().Return([]sdk.Request{}, nil).Once() requeue, err := s.service.ReconcileLANDeletion(s.ctx, s.machineScope) s.NoError(err) @@ -173,16 +172,16 @@ func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsNoPendingRequestsHasOth } func (s *lanSuite) TestNetworkReconcileLANDeleteNoExistingLANExistingRequestPending() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() - s.mockGetLANCreationRequestsCall().Return(s.examplePostRequest(sdk.RequestStatusQueued), nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() + s.mockGetLANCreationRequestsCall(s.machineScope.DatacenterID()).Return(s.exampleLANPostRequest(exampleLANID, sdk.RequestStatusQueued), nil).Once() requeue, err := s.service.ReconcileLANDeletion(s.ctx, s.machineScope) s.NoError(err) s.True(requeue) } func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsExistingRequestPending() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() - requests := s.exampleDeleteRequest(sdk.RequestStatusQueued) + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() + requests := s.exampleLANDeleteRequest(exampleLANID, sdk.RequestStatusQueued) s.mockGetLANDeletionRequestsCall().Return(requests, nil).Once() requeue, err := s.service.ReconcileLANDeletion(s.ctx, s.machineScope) s.NoError(err) @@ -190,8 +189,8 @@ func (s *lanSuite) TestNetworkReconcileLANDeleteLANExistsExistingRequestPending( } func (s *lanSuite) TestNetworkReconcileLANDeleteLANDoesNotExist() { - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() - s.mockGetLANCreationRequestsCall().Return(nil, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{}}, nil).Once() + s.mockGetLANCreationRequestsCall(s.machineScope.DatacenterID()).Return(nil, nil).Once() requeue, err := s.service.ReconcileLANDeletion(s.ctx, s.machineScope) s.NoError(err) s.False(requeue) @@ -205,8 +204,8 @@ func (s *lanSuite) TestReconcileIPFailoverNICNotInFailoverGroup() { testServer := s.defaultServer(s.infraMachine, exampleDHCPIP, exampleEndpointIP) - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{s.exampleLAN()}}, nil).Once() s.mockGetLANPatchRequestCall().Return([]sdk.Request{s.examplePatchRequest(sdk.RequestStatusDone)}, nil).Once() props := sdk.LanProperties{ @@ -232,8 +231,8 @@ func (s *lanSuite) TestReconcileIPFailoverNICAlreadyInFailoverGroup() { NicUuid: ptr.To(exampleNICID), }} - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.mockGetLANPatchRequestCall().Return([]sdk.Request{s.examplePatchRequest(sdk.RequestStatusDone)}, nil).Once() requeue, err := s.service.ReconcileIPFailover(s.ctx, s.machineScope) @@ -265,8 +264,8 @@ func (s *lanSuite) TestReconcileIPFailoverForWorkerWithAUTOSettings() { nil).Once() s.mockListIPBlocksCall().Return(&sdk.IpBlocks{Items: &[]sdk.IpBlock{ipBlock}}, nil).Once() - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() patchRequest := s.examplePatchRequest(sdk.RequestStatusDone) s.mockGetLANPatchRequestCall().Return([]sdk.Request{patchRequest}, nil).Once() @@ -314,8 +313,8 @@ func (s *lanSuite) TestReconcileIPFailoverNICHasWrongIPInFailoverGroup() { NicUuid: ptr.To(exampleNICID), }} - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.setupSuccessfulLANPatchMocks() // expect a patch request to update the IP @@ -356,8 +355,8 @@ func (s *lanSuite) TestReconcileIPFailoverAnotherNICInFailoverGroup() { NicUuid: ptr.To("f3b3f8e4-1f3d-11ec-82a8-0242ac130003"), }} - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.mockGetLANPatchRequestCall().Return([]sdk.Request{s.examplePatchRequest(sdk.RequestStatusDone)}, nil).Once() requeue, err := s.service.ReconcileIPFailover(s.ctx, s.machineScope) @@ -374,8 +373,8 @@ func setControlPlaneLabel(ctx context.Context, k8sClient client.Client, machine } func (s *lanSuite) reconcileIPFailoverDeletion(testServer *sdk.Server, testLAN sdk.Lan) (requeue bool, err error) { - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.setupSuccessfulLANPatchMocks() props := sdk.LanProperties{ @@ -444,7 +443,7 @@ func (s *lanSuite) TestReconcileIPFailoverDeletionControlPlaneSwitchNIC() { newIonosMachine.SetName("test-machine-2") newIonosMachine.SetResourceVersion("") newIonosMachine.SetCreationTimestamp(metav1.NewTime(time.Now())) - newIonosMachine.Spec.ProviderID = ptr.To("ionos://" + exampleSecondaryServerID) //nolint: goconst + newIonosMachine.Spec.ProviderID = ptr.To("ionos://" + exampleSecondaryServerID) err = s.k8sClient.Create(s.ctx, newIonosMachine) s.NoError(err) @@ -462,9 +461,9 @@ func (s *lanSuite) TestReconcileIPFailoverDeletionControlPlaneSwitchNIC() { testSecondaryServer.Id = ptr.To(exampleSecondaryServerID) (*testSecondaryServer.Entities.Nics.Items)[0].Id = ptr.To(exampleSecondaryNICID) - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockGetServerCall(exampleSecondaryServerID).Return(testSecondaryServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleSecondaryServerID).Return(testSecondaryServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.setupSuccessfulLANPatchMocks() @@ -510,8 +509,8 @@ func (s *lanSuite) TestReconcileFailoverDeletionWorkerNoSwap() { NicUuid: ptr.To(exampleSecondaryNICID), }} - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{testLAN}}, nil).Once() s.mockGetLANPatchRequestCall().Return(nil, nil).Once() requeue, err := s.service.ReconcileIPFailoverDeletion(s.ctx, s.machineScope) @@ -540,7 +539,7 @@ func (s *lanSuite) TestReconcileIPFailoverDeletionServerNotFound() { err := setControlPlaneLabel(s.ctx, s.k8sClient, s.machineScope.IonosMachine) s.NoError(err) - s.mockGetServerCall(exampleServerID). + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID). Return(nil, sdk.NewGenericOpenAPIError("server not found", nil, nil, 404)). Once() s.mockListServerCall().Return(&sdk.Servers{Items: &[]sdk.Server{}}, nil).Once() @@ -562,7 +561,7 @@ func (s *lanSuite) TestReconcileIPFailoverDeletionPrimaryNICNotFound() { Id: ptr.To(exampleServerID), } - s.mockGetServerCall(exampleServerID).Return(testServer, nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(testServer, nil).Once() requeue, err := s.service.ReconcileIPFailoverDeletion(s.ctx, s.machineScope) @@ -591,31 +590,6 @@ func (s *lanSuite) exampleIPBlock() sdk.IpBlock { } } -func (s *lanSuite) examplePostRequest(status string) []sdk.Request { - opts := requestBuildOptions{ - status: status, - method: http.MethodPost, - url: s.service.lansURL(s.machineScope.DatacenterID()), - body: fmt.Sprintf(`{"properties": {"name": "%s"}}`, s.service.lanName(s.clusterScope.Cluster)), - href: exampleRequestPath, - targetID: exampleLANID, - targetType: sdk.LAN, - } - return []sdk.Request{s.exampleRequest(opts)} -} - -func (s *lanSuite) exampleDeleteRequest(status string) []sdk.Request { - opts := requestBuildOptions{ - status: status, - method: http.MethodDelete, - url: s.service.lanURL(s.machineScope.DatacenterID(), exampleLANID), - href: exampleRequestPath, - targetID: exampleLANID, - targetType: sdk.LAN, - } - return []sdk.Request{s.exampleRequest(opts)} -} - func (s *lanSuite) examplePatchRequest(status string) sdk.Request { opts := requestBuildOptions{ status: status, @@ -636,18 +610,10 @@ func (s *lanSuite) mockCreateLANCall() *clienttest.MockClient_CreateLAN_Call { }) } -func (s *lanSuite) mockDeleteLANCall(id string) *clienttest.MockClient_DeleteLAN_Call { - return s.ionosClient.EXPECT().DeleteLAN(s.ctx, s.machineScope.DatacenterID(), id) -} - func (s *lanSuite) mockPatchLANCall(props sdk.LanProperties) *clienttest.MockClient_PatchLAN_Call { return s.ionosClient.EXPECT().PatchLAN(s.ctx, s.machineScope.DatacenterID(), exampleLANID, props) } -func (s *lanSuite) mockGetLANCreationRequestsCall() *clienttest.MockClient_GetRequests_Call { - return s.ionosClient.EXPECT().GetRequests(s.ctx, http.MethodPost, s.service.lansURL(s.machineScope.DatacenterID())) -} - func (s *lanSuite) mockGetLANPatchRequestCall() *clienttest.MockClient_GetRequests_Call { return s.ionosClient.EXPECT(). GetRequests(s.ctx, http.MethodPatch, s.service.lanURL(s.machineScope.DatacenterID(), exampleLANID)) diff --git a/internal/service/cloud/nic.go b/internal/service/cloud/nic.go index 2b9aa9da..bc2f2fa2 100644 --- a/internal/service/cloud/nic.go +++ b/internal/service/cloud/nic.go @@ -30,6 +30,10 @@ import ( "github.com/ionos-cloud/cluster-api-provider-ionoscloud/scope" ) +func (*Service) nicsURL(datacenterID, serverID string) string { + return path.Join("datacenters", datacenterID, "servers", serverID, "nics") +} + func (*Service) nicURL(ms *scope.Machine, serverID, nicID string) string { return path.Join("datacenters", ms.DatacenterID(), "servers", serverID, "nics", nicID) } @@ -131,6 +135,19 @@ func (s *Service) patchNIC( return s.ionosClient.WaitForRequest(ctx, location) } +func (s *Service) createAndAttachNIC( + ctx context.Context, + datacenterID, serverID string, + properties sdk.NicProperties, +) error { + location, err := s.ionosClient.CreateNIC(ctx, datacenterID, serverID, properties) + if err != nil { + return err + } + + return s.ionosClient.WaitForRequest(ctx, location) +} + func (s *Service) getLatestNICPatchRequest( ctx context.Context, ms *scope.Machine, serverID string, nicID string, ) (*requestInfo, error) { @@ -142,6 +159,16 @@ func (s *Service) getLatestNICPatchRequest( ) } +func (s *Service) getLatestNICCreateRequest( + ctx context.Context, datacenterID, serverID, nicName string, +) (*requestInfo, error) { + return getMatchingRequest[sdk.Nic]( + ctx, s, + http.MethodPost, + s.nicsURL(datacenterID, serverID), + matchByName[*sdk.Nic, *sdk.NicProperties](nicName)) +} + // nicHasIP returns true if the NIC contains the given IP address. func nicHasIP(nic *sdk.Nic, expectedIP string) bool { ips := ptr.Deref(nic.GetProperties().GetIps(), []string{}) diff --git a/internal/service/cloud/nic_test.go b/internal/service/cloud/nic_test.go index b6ac51db..fc678f20 100644 --- a/internal/service/cloud/nic_test.go +++ b/internal/service/cloud/nic_test.go @@ -48,7 +48,7 @@ func (s *nicSuite) TestNICName() { } func (s *nicSuite) TestReconcileNICConfig() { - s.mockGetServerCall(exampleServerID).Return(s.defaultServer(s.infraMachine, exampleDHCPIP), nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(s.defaultServer(s.infraMachine, exampleDHCPIP), nil).Once() // no patch request s.mockGetLatestNICPatchRequest(exampleServerID, exampleNICID).Return([]sdk.Request{}, nil).Once() @@ -67,7 +67,7 @@ func (s *nicSuite) TestReconcileNICConfig() { } func (s *nicSuite) TestReconcileNICConfigIPIsSet() { - s.mockGetServerCall(exampleServerID). + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID). Return(s.defaultServer(s.infraMachine, exampleDHCPIP, exampleEndpointIP), nil).Once() nic, err := s.service.reconcileNICConfig(s.ctx, s.machineScope, exampleEndpointIP) @@ -77,7 +77,7 @@ func (s *nicSuite) TestReconcileNICConfigIPIsSet() { } func (s *nicSuite) TestReconcileNICConfigPatchRequestPending() { - s.mockGetServerCall(exampleServerID).Return(s.defaultServer(s.infraMachine, exampleDHCPIP), nil).Once() + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(s.defaultServer(s.infraMachine, exampleDHCPIP), nil).Once() patchRequest := s.examplePatchRequest(sdk.RequestStatusQueued, exampleServerID, exampleNICID) diff --git a/internal/service/cloud/request.go b/internal/service/cloud/request.go index 975e1b8f..66a6d61e 100644 --- a/internal/service/cloud/request.go +++ b/internal/service/cloud/request.go @@ -64,6 +64,8 @@ func mapResourceType(cloudResource any) sdk.Type { return sdk.SERVER case sdk.IpBlock, *sdk.IpBlock: return sdk.IPBLOCK + case sdk.NetworkLoadBalancer, *sdk.NetworkLoadBalancer: + return sdk.NETWORKLOADBALANCER default: return "" } diff --git a/internal/service/cloud/server_test.go b/internal/service/cloud/server_test.go index 7799e4c7..ee60e4a6 100644 --- a/internal/service/cloud/server_test.go +++ b/internal/service/cloud/server_test.go @@ -161,7 +161,7 @@ func (s *serverSuite) TestReconcileServerAdditionalNetworks() { s.prepareReconcileServerRequestTest() s.mockGetServerCreationRequestCall().Return([]sdk.Request{}, nil) - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{{ + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{{ Id: ptr.To(exampleLANID), Properties: &sdk.LanProperties{ Name: ptr.To(s.service.lanName(s.clusterScope.Cluster)), @@ -193,7 +193,7 @@ func (s *serverSuite) TestReconcileEnterpriseServerNoRequest() { s.prepareReconcileServerRequestTest() s.mockGetServerCreationRequestCall().Return([]sdk.Request{}, nil) s.mockCreateServerCall(s.defaultServerComponents()).Return(&sdk.Server{Id: ptr.To("12345")}, "location/to/server", nil) - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{{ + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{{ Id: ptr.To(exampleLANID), Properties: &sdk.LanProperties{ Name: ptr.To(s.service.lanName(s.clusterScope.Cluster)), @@ -214,7 +214,7 @@ func (s *serverSuite) TestReconcileVCPUServerNoRequest() { props, entities := s.defaultServerComponents() props.Type = ptr.To(infrav1.ServerTypeVCPU.String()) s.mockCreateServerCall(props, entities).Return(&sdk.Server{Id: ptr.To("12345")}, "location/to/server", nil) - s.mockListLANsCall().Return(&sdk.Lans{Items: &[]sdk.Lan{{ + s.mockListLANsCall(s.machineScope.DatacenterID()).Return(&sdk.Lans{Items: &[]sdk.Lan{{ Id: ptr.To(exampleLANID), Properties: &sdk.LanProperties{ Name: ptr.To(s.service.lanName(s.clusterScope.Cluster)), @@ -249,7 +249,7 @@ func (s *serverSuite) prepareReconcileServerRequestTest() { } func (s *serverSuite) TestReconcileServerDeletion() { - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), }, nil) @@ -263,7 +263,7 @@ func (s *serverSuite) TestReconcileServerDeletion() { } func (s *serverSuite) TestReconcileServerDeletionDeleteBootVolume() { - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), Properties: &sdk.ServerProperties{ BootVolume: &sdk.ResourceReference{ @@ -272,7 +272,7 @@ func (s *serverSuite) TestReconcileServerDeletionDeleteBootVolume() { }, }, nil).Once() - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), }, nil).Once() @@ -293,7 +293,7 @@ func (s *serverSuite) TestReconcileServerDeletionDeleteBootVolume() { func (s *serverSuite) TestReconcileServerDeletionDeleteAllVolumes() { s.clusterScope.Cluster.DeletionTimestamp = ptr.To(metav1.Now()) - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), Properties: &sdk.ServerProperties{ BootVolume: &sdk.ResourceReference{ @@ -321,7 +321,7 @@ func (s *serverSuite) validateSuccessfulDeletionResponse(success bool, err error } func (s *serverSuite) TestReconcileServerDeletionServerNotFound() { - s.mockGetServerCall(exampleServerID).Return(nil, sdk.NewGenericOpenAPIError("not found", nil, nil, 404)) + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(nil, sdk.NewGenericOpenAPIError("not found", nil, nil, 404)) s.mockGetServerCreationRequestCall().Return([]sdk.Request{s.examplePostRequest(sdk.RequestStatusDone)}, nil) s.mockListServersCall().Return(&sdk.Servers{}, nil) @@ -332,7 +332,7 @@ func (s *serverSuite) TestReconcileServerDeletionServerNotFound() { func (s *serverSuite) TestReconcileServerDeletionUnexpectedError() { internalError := sdk.NewGenericOpenAPIError("unexpected error returned", nil, nil, 500) - s.mockGetServerCall(exampleServerID).Return( + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return( nil, internalError, ) @@ -342,7 +342,7 @@ func (s *serverSuite) TestReconcileServerDeletionUnexpectedError() { } func (s *serverSuite) TestReconcileServerDeletionCreateRequestPending() { - s.mockGetServerCall(exampleServerID).Return(nil, nil) + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(nil, nil) s.mockListServersCall().Return(&sdk.Servers{}, nil) exampleRequest := s.examplePostRequest(sdk.RequestStatusQueued) s.mockGetServerCreationRequestCall().Return([]sdk.Request{exampleRequest}, nil) @@ -357,7 +357,7 @@ func (s *serverSuite) TestReconcileServerDeletionCreateRequestPending() { } func (s *serverSuite) TestReconcileServerDeletionRequestPending() { - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), }, nil) @@ -375,7 +375,7 @@ func (s *serverSuite) TestReconcileServerDeletionRequestPending() { } func (s *serverSuite) TestReconcileServerDeletionRequestDone() { - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), }, nil) @@ -391,7 +391,7 @@ func (s *serverSuite) TestReconcileServerDeletionRequestDone() { } func (s *serverSuite) TestReconcileServerDeletionRequestFailed() { - s.mockGetServerCall(exampleServerID).Return(&sdk.Server{ + s.mockGetServerCall(s.machineScope.DatacenterID(), exampleServerID).Return(&sdk.Server{ Id: ptr.To(exampleServerID), }, nil) @@ -411,7 +411,7 @@ func (s *serverSuite) TestReconcileServerDeletionRequestFailed() { func (s *serverSuite) TestGetServerWithProviderID() { serverID := exampleServerID - s.mockGetServerCall(serverID).Return(&sdk.Server{}, nil) + s.mockGetServerCall(s.machineScope.DatacenterID(), serverID).Return(&sdk.Server{}, nil) server, err := s.service.getServer(s.ctx, s.machineScope) s.NoError(err) s.NotNil(server) @@ -419,7 +419,7 @@ func (s *serverSuite) TestGetServerWithProviderID() { func (s *serverSuite) TestGetServerWithProviderIDNotFound() { serverID := exampleServerID - s.mockGetServerCall(serverID).Return(nil, sdk.NewGenericOpenAPIError("not found", nil, nil, 404)) + s.mockGetServerCall(s.machineScope.DatacenterID(), serverID).Return(nil, sdk.NewGenericOpenAPIError("not found", nil, nil, 404)) s.mockListServersCall().Return(&sdk.Servers{Items: &[]sdk.Server{ { Properties: nil, diff --git a/internal/service/cloud/suite_test.go b/internal/service/cloud/suite_test.go index c6463d9b..6232e22a 100644 --- a/internal/service/cloud/suite_test.go +++ b/internal/service/cloud/suite_test.go @@ -26,6 +26,7 @@ import ( "github.com/go-logr/logr" sdk "github.com/ionos-cloud/sdk-go/v6" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -64,6 +65,8 @@ const ( exampleSecondaryDHCPIP = "192.0.2.3" ) +const ionosProviderIDPrefix = "ionos://" + const ( exampleLANID = "42" exampleNICID = "f3b3f8e4-3b6d-4b6d-8f1d-3e3e6e3e3e3e" @@ -75,22 +78,26 @@ const ( exampleRequestPath = "/test" exampleLocation = "de/txl" exampleDatacenterID = "ccf27092-34e8-499e-a2f5-2bdee9d34a12" + exampleNLBID = "f3b3f8e4-3b6d-4b6d-8f1d-3e3e6e3e3e3f" + exampleForwardingRuleID = "f3b3f8e4-3b6d-4b6d-8f1d-3e3e6e3e3e3e" ) type ServiceTestSuite struct { *require.Assertions suite.Suite - k8sClient client.Client - ctx context.Context - machineScope *scope.Machine - clusterScope *scope.Cluster - log logr.Logger - service *Service - capiCluster *clusterv1.Cluster - capiMachine *clusterv1.Machine - infraCluster *infrav1.IonosCloudCluster - infraMachine *infrav1.IonosCloudMachine - ionosClient *clienttest.MockClient + k8sClient client.Client + ctx context.Context + machineScope *scope.Machine + clusterScope *scope.Cluster + loadBalancerScope *scope.LoadBalancer + log logr.Logger + service *Service + capiCluster *clusterv1.Cluster + capiMachine *clusterv1.Machine + infraCluster *infrav1.IonosCloudCluster + infraMachine *infrav1.IonosCloudMachine + infraLoadBalancer *infrav1.IonosCloudLoadBalancer + ionosClient *clienttest.MockClient } func (s *ServiceTestSuite) SetupSuite() { @@ -139,7 +146,7 @@ func (s *ServiceTestSuite) SetupTest() { Spec: clusterv1.MachineSpec{ ClusterName: s.capiCluster.Name, Version: ptr.To("v1.26.12"), - ProviderID: ptr.To("ionos://" + exampleServerID), + ProviderID: ptr.To(ionosProviderIDPrefix + exampleServerID), }, } s.infraMachine = &infrav1.IonosCloudMachine{ @@ -152,7 +159,7 @@ func (s *ServiceTestSuite) SetupTest() { }, }, Spec: infrav1.IonosCloudMachineSpec{ - ProviderID: ptr.To("ionos://" + exampleServerID), + ProviderID: ptr.To(ionosProviderIDPrefix + exampleServerID), DatacenterID: "ccf27092-34e8-499e-a2f5-2bdee9d34a12", NumCores: 2, AvailabilityZone: infrav1.AvailabilityZoneAuto, @@ -172,6 +179,26 @@ func (s *ServiceTestSuite) SetupTest() { Status: infrav1.IonosCloudMachineStatus{}, } + s.infraLoadBalancer = &infrav1.IonosCloudLoadBalancer{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-loadbalancer", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: s.capiCluster.Name, + }, + }, + Spec: infrav1.IonosCloudLoadBalancerSpec{ + LoadBalancerSource: infrav1.LoadBalancerSource{ + NLB: &infrav1.NLBSpec{ + DatacenterID: exampleDatacenterID, + }, + }, + }, + Status: infrav1.IonosCloudLoadBalancerStatus{ + NLBStatus: &infrav1.NLBStatus{}, + }, + } + scheme := runtime.NewScheme() s.NoError(clusterv1.AddToScheme(scheme), "failed to extend scheme with Cluster API types") s.NoError(ipamv1.AddToScheme(scheme), "failed to extend scheme with Cluster API ipam types") @@ -200,6 +227,15 @@ func (s *ServiceTestSuite) SetupTest() { IonosMachine: s.infraMachine, Locker: locker.New(), }) + + s.NoError(err) + + s.loadBalancerScope, err = scope.NewLoadBalancer(scope.LoadBalancerParams{ + Client: s.k8sClient, + LoadBalancer: s.infraLoadBalancer, + ClusterScope: s.clusterScope, + Locker: locker.New(), + }) s.NoError(err, "failed to create machine scope") s.service, err = NewService(s.ionosClient, s.log) @@ -343,37 +379,78 @@ func (s *ServiceTestSuite) defaultServerComponents() (sdk.ServerProperties, sdk. } func (s *ServiceTestSuite) mockGetIPBlocksRequestsPostCall() *clienttest.MockClient_GetRequests_Call { - return s.ionosClient.EXPECT().GetRequests(s.ctx, http.MethodPost, ipBlocksPath) + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodPost, ipBlocksPath) } func (s *ServiceTestSuite) mockGetIPBlocksRequestsDeleteCall(id string) *clienttest.MockClient_GetRequests_Call { - return s.ionosClient.EXPECT().GetRequests(s.ctx, http.MethodDelete, path.Join(ipBlocksPath, id)) + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodDelete, path.Join(ipBlocksPath, id)) } func (s *ServiceTestSuite) mockListIPBlocksCall() *clienttest.MockClient_ListIPBlocks_Call { - return s.ionosClient.EXPECT().ListIPBlocks(s.ctx) + return s.ionosClient.EXPECT().ListIPBlocks(mock.MatchedBy(nonNilCtx)) } func (s *ServiceTestSuite) mockGetIPBlockByIDCall(ipBlockID string) *clienttest.MockClient_GetIPBlock_Call { - return s.ionosClient.EXPECT().GetIPBlock(s.ctx, ipBlockID) + return s.ionosClient.EXPECT().GetIPBlock(mock.MatchedBy(nonNilCtx), ipBlockID) } func (s *ServiceTestSuite) mockReserveIPBlockCall(name, location string) *clienttest.MockClient_ReserveIPBlock_Call { - return s.ionosClient.EXPECT().ReserveIPBlock(s.ctx, name, location, int32(1)) + return s.ionosClient.EXPECT().ReserveIPBlock(mock.MatchedBy(nonNilCtx), name, location, int32(1)) } func (s *ServiceTestSuite) mockWaitForRequestCall(location string) *clienttest.MockClient_WaitForRequest_Call { - return s.ionosClient.EXPECT().WaitForRequest(s.ctx, location) + return s.ionosClient.EXPECT().WaitForRequest(mock.MatchedBy(nonNilCtx), location) } -func (s *ServiceTestSuite) mockGetServerCall(serverID string) *clienttest.MockClient_GetServer_Call { - return s.ionosClient.EXPECT().GetServer(s.ctx, s.machineScope.DatacenterID(), serverID) +func (s *ServiceTestSuite) mockGetServerCall(datacenterID, serverID string) *clienttest.MockClient_GetServer_Call { + return s.ionosClient.EXPECT().GetServer(mock.MatchedBy(nonNilCtx), datacenterID, serverID) } -func (s *ServiceTestSuite) mockListLANsCall() *clienttest.MockClient_ListLANs_Call { - return s.ionosClient.EXPECT().ListLANs(s.ctx, s.machineScope.DatacenterID()) +func (s *ServiceTestSuite) mockListServersCall(datacenterID string) *clienttest.MockClient_ListServers_Call { + return s.ionosClient.EXPECT().ListServers(mock.MatchedBy(nonNilCtx), datacenterID) +} + +func (s *ServiceTestSuite) mockListLANsCall(datacenterID string) *clienttest.MockClient_ListLANs_Call { + return s.ionosClient.EXPECT().ListLANs(mock.MatchedBy(nonNilCtx), datacenterID) +} + +func (s *ServiceTestSuite) mockDeleteLANCall(id string) *clienttest.MockClient_DeleteLAN_Call { + return s.ionosClient.EXPECT().DeleteLAN(s.ctx, s.machineScope.DatacenterID(), id) } func (s *ServiceTestSuite) mockGetDatacenterLocationByIDCall(datacenterID string) *clienttest.MockClient_GetDatacenterLocationByID_Call { - return s.ionosClient.EXPECT().GetDatacenterLocationByID(s.ctx, datacenterID) + return s.ionosClient.EXPECT().GetDatacenterLocationByID(mock.MatchedBy(nonNilCtx), datacenterID) +} + +func (s *ServiceTestSuite) mockGetLANCreationRequestsCall(datacenterID string) *clienttest.MockClient_GetRequests_Call { + return s.ionosClient.EXPECT().GetRequests(mock.MatchedBy(nonNilCtx), http.MethodPost, s.service.lansURL(datacenterID)) +} + +func nonNilCtx(c context.Context) bool { + return c != nil +} + +func (s *ServiceTestSuite) exampleLANDeleteRequest(lanID, status string) []sdk.Request { + opts := requestBuildOptions{ + status: status, + method: http.MethodDelete, + url: s.service.lanURL(s.machineScope.DatacenterID(), lanID), + href: exampleRequestPath, + targetID: lanID, + targetType: sdk.LAN, + } + return []sdk.Request{s.exampleRequest(opts)} +} + +func (s *lanSuite) exampleLANPostRequest(lanID, status string) []sdk.Request { + opts := requestBuildOptions{ + status: status, + method: http.MethodPost, + url: s.service.lansURL(s.machineScope.DatacenterID()), + body: fmt.Sprintf(`{"properties": {"name": "%s"}}`, s.service.lanName(s.clusterScope.Cluster)), + href: exampleRequestPath, + targetID: lanID, + targetType: sdk.LAN, + } + return []sdk.Request{s.exampleRequest(opts)} } diff --git a/scope/loadbalancer.go b/scope/loadbalancer.go index 328e99b0..4f6ee93c 100644 --- a/scope/loadbalancer.go +++ b/scope/loadbalancer.go @@ -20,6 +20,8 @@ import ( "context" "errors" "fmt" + "net" + "net/netip" "time" "k8s.io/client-go/util/retry" @@ -46,6 +48,7 @@ type LoadBalancer struct { client client.Client patchHelper *patch.Helper Locker *locker.Locker + resolver resolver LoadBalancer *infrav1.IonosCloudLoadBalancer @@ -75,6 +78,7 @@ func NewLoadBalancer(params LoadBalancerParams) (*LoadBalancer, error) { return &LoadBalancer{ client: params.Client, patchHelper: helper, + resolver: net.DefaultResolver, Locker: params.Locker, LoadBalancer: params.LoadBalancer, ClusterScope: params.ClusterScope, @@ -86,6 +90,32 @@ func (l *LoadBalancer) Endpoint() clusterv1.APIEndpoint { return l.LoadBalancer.Spec.LoadBalancerEndpoint } +// ResolveEndpoint resolves the IP addresses for the given Endpoint of the Load Balancer. +func (l *LoadBalancer) ResolveEndpoint(ctx context.Context) ([]string, error) { + host := l.Endpoint().Host + + if host == "" { + return nil, nil + } + + if ip, err := netip.ParseAddr(host); err == nil { + return []string{ip.String()}, nil + } + + ips, err := l.resolver.LookupNetIP(ctx, "ip4", host) + if err != nil { + return nil, fmt.Errorf("failed to resolve host %q: %w", host, err) + } + + ret := make([]string, 0, len(ips)) + + for _, ip := range ips { + ret = append(ret, ip.String()) + } + + return ret, nil +} + // InfraClusterEndpoint returns the endpoint from the infra cluster.. func (l *LoadBalancer) InfraClusterEndpoint() clusterv1.APIEndpoint { return l.ClusterScope.IonosCluster.Spec.ControlPlaneEndpoint diff --git a/templates/cluster-template-nlb.yaml b/templates/cluster-template-nlb.yaml new file mode 100644 index 00000000..b41bd818 --- /dev/null +++ b/templates/cluster-template-nlb.yaml @@ -0,0 +1,336 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: v1 +kind: Secret +metadata: + name: "${CLUSTER_NAME}-credentials" +type: Opaque +stringData: + token: "${IONOS_TOKEN}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + location: ${CONTROL_PLANE_ENDPOINT_LOCATION} + loadBalancerProviderRef: + name: "${CLUSTER_NAME}-nlb" + credentialsRef: + name: "${CLUSTER_NAME}-credentials" +--- +kind: IonosCloudLoadBalancer +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-nlb" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + loadBalancerEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_HOST:-${CONTROL_PLANE_ENDPOINT_IP}} + port: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nlb: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: IonosCloudMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + + - content: | + #!/bin/bash + set -e + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + NODE_IPv4_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet") | select(.scope=="global") | select(.dynamic) | .local') + if [[ $NODE_IPv4_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv4_ADDRESS"'/' /etc/default/kubelet + fi + # IPv6 currently not set, the ip is not set then this runs. Needs to be waited for. + NODE_IPv6_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet6") | select(.scope=="global") | .local') + if [[ $NODE_IPv6_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv6_ADDRESS"'/' /etc/default/kubelet + fi + owner: root:root + path: /etc/set-node-ip.sh + permissions: '0700' + + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + # workaround 1.29 IP issue + - /etc/set-node-ip.sh + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be installed for cluster-api + # to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + initConfiguration: + localAPIEndpoint: + bindPort: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + version: "${KUBERNETES_VERSION}" +--- +kind: IonosCloudMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-4} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-8192} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-2} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-4096} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be + # installed for cluster-api to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + criSocket: unix:///run/containerd/containerd.sock diff --git a/test/e2e/data/infrastructure-ionoscloud/cluster-template-nlb.yaml b/test/e2e/data/infrastructure-ionoscloud/cluster-template-nlb.yaml new file mode 100644 index 00000000..8a0f5c40 --- /dev/null +++ b/test/e2e/data/infrastructure-ionoscloud/cluster-template-nlb.yaml @@ -0,0 +1,354 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + cni: "${CLUSTER_NAME}-crs-0" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudCluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: KubeadmControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudCluster +metadata: + name: "${CLUSTER_NAME}" +spec: + location: ${CONTROL_PLANE_ENDPOINT_LOCATION} + loadBalancerProviderRef: + name: "${CLUSTER_NAME}-nlb" + credentialsRef: + name: "ionoscloud-credentials" +--- +kind: IonosCloudLoadBalancer +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-nlb" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + loadBalancerEndpoint: + host: ${CONTROL_PLANE_ENDPOINT_HOST:-${CONTROL_PLANE_ENDPOINT_IP}} + port: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nlb: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: IonosCloudMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: "${CLUSTER_NAME}-control-plane" + kubeadmConfigSpec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + + - content: | + #!/bin/bash + set -e + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + NODE_IPv4_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet") | select(.scope=="global") | select(.dynamic) | .local') + if [[ $NODE_IPv4_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv4_ADDRESS"'/' /etc/default/kubelet + fi + # IPv6 currently not set, the ip is not set then this runs. Needs to be waited for. + NODE_IPv6_ADDRESS=$(ip -j addr show dev ens6 | jq -r '.[].addr_info[] | select(.family == "inet6") | select(.scope=="global") | .local') + if [[ $NODE_IPv6_ADDRESS ]]; then + sed -i '$ s/$/ --node-ip '"$NODE_IPv6_ADDRESS"'/' /etc/default/kubelet + fi + owner: root:root + path: /etc/set-node-ip.sh + permissions: '0700' + + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + # workaround 1.29 IP issue + - /etc/set-node-ip.sh + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be installed for cluster-api + # to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + initConfiguration: + localAPIEndpoint: + bindPort: ${CONTROL_PLANE_ENDPOINT_PORT:-6443} + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + joinConfiguration: + nodeRegistration: + criSocket: unix:///run/containerd/containerd.sock + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + version: "${KUBERNETES_VERSION}" +--- +kind: IonosCloudMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + template: + spec: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-4} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-8192} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "${CLUSTER_NAME}-workers" + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + node-role.kubernetes.io/node: "" + spec: + clusterName: "${CLUSTER_NAME}" + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: "${CLUSTER_NAME}-worker" + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: IonosCloudMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: IonosCloudMachineTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + datacenterID: ${IONOSCLOUD_DATACENTER_ID} + numCores: ${IONOSCLOUD_MACHINE_NUM_CORES:-2} + memoryMB: ${IONOSCLOUD_MACHINE_MEMORY_MB:-4096} + disk: + image: + id: ${IONOSCLOUD_MACHINE_IMAGE_ID} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "${CLUSTER_NAME}-worker" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: [${IONOSCLOUD_MACHINE_SSH_KEYS}] + ntp: + enabled: true + servers: + - 0.de.pool.ntp.org + - 1.de.pool.ntp.org + - 2.de.pool.ntp.org + - 3.de.pool.ntp.org + files: + - path: /etc/ssh/sshd_config.d/ssh-audit_hardening.conf + owner: root:root + permissions: '0644' + content: | + # Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com + # hardening guide. + KexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256 + Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr + MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com + HostKeyAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + CASignatureAlgorithms sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512,rsa-sha2-256 + GSSAPIKexAlgorithms gss-curve25519-sha256-,gss-group16-sha512- + HostbasedAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + PubkeyAcceptedAlgorithms sk-ssh-ed25519-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,ssh-ed25519,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-256-cert-v01@openssh.com,rsa-sha2-256 + - path: /etc/sysctl.d/k8s.conf + content: | + fs.inotify.max_user_watches = 65536 + net.netfilter.nf_conntrack_max = 1000000 + - path: /etc/modules-load.d/k8s.conf + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + ip_vs_sed + # Crictl config + - path: /etc/crictl.yaml + content: | + runtime-endpoint: unix:///run/containerd/containerd.sock + timeout: 10 + # CSI Metadata config + - content: | + { + "datacenter-id": "${IONOSCLOUD_DATACENTER_ID}" + } + owner: root:root + path: /etc/ie-csi/cfg.json + permissions: '0644' + preKubeadmCommands: + - systemctl restart systemd-networkd.service systemd-modules-load.service systemd-journald containerd + # disable swap + - swapoff -a + - sed -i '/ swap / s/^/#/' /etc/fstab + - sysctl --system + postKubeadmCommands: + - > + systemctl disable --now udisks2 multipathd motd-news.timer fwupd-refresh.timer + packagekit ModemManager snapd snapd.socket snapd.apparmor snapd.seeded + # INFO(schegi-ionos): We decided to not remove this for now, since removing this would require the ccm to be + # installed for cluster-api to continue after the first node. + - export system_uuid=$(kubectl --kubeconfig /etc/kubernetes/kubelet.conf get node $(hostname) -ojsonpath='{..systemUUID }') + - > + kubectl --kubeconfig /etc/kubernetes/kubelet.conf + patch node $(hostname) + --type strategic -p '{"spec": {"providerID": "ionos://'$${system_uuid}'"}}' + - rm /etc/ssh/ssh_host_* + - ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N "" + - ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" + - sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config + - awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe + - mv /etc/ssh/moduli.safe /etc/ssh/moduli + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - iptables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --set + - ip6tables -I INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 10 --hitcount 10 -j DROP + - apt-get update + - DEBIAN_FRONTEND=noninteractive apt-get install -q -y netfilter-persistent iptables-persistent + - service netfilter-persistent save + - systemctl restart sshd + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # use cloud-provider: external when using a CCM + cloud-provider: "" + criSocket: unix:///run/containerd/containerd.sock +--- +# ConfigMap object referenced by the ClusterResourceSet object and with +# the CNI resource defined in the test config file +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-0" +data: ${CNI_RESOURCES} +--- +# ClusterResourceSet object with +# a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: "${CLUSTER_NAME}-crs-0" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-0" + resources: + - name: "cni-${CLUSTER_NAME}-crs-0" + kind: ConfigMap