diff --git a/apis/civo/cluster/v1alpha1/types.go b/apis/civo/cluster/v1alpha1/types.go index 0e7e427..29f6bd3 100644 --- a/apis/civo/cluster/v1alpha1/types.go +++ b/apis/civo/cluster/v1alpha1/types.go @@ -43,11 +43,16 @@ type CivoKubernetesConnectionDetails struct { type CivoKubernetesSpec struct { xpv1.ResourceSpec `json:",inline"` Name string `json:"name"` + Region string `json:"region,omitempty"` Pools []civogo.KubernetesClusterPoolConfig `json:"pools"` // +optional // A list of applications to install from civo marketplace. Applications []string `json:"applications,omitempty"` ConnectionDetails CivoKubernetesConnectionDetails `json:"connectionDetails"` + // +required + // +immutable + // NOTE: This can only be set at creation time. Changing this value after creation will not move the cluster into another network.. + NetworkID *string `json:"networkId"` // +optional // +kubebuilder:validation:Enum=flannel;cilium // +kubebuilder:default=flannel @@ -59,7 +64,9 @@ type CivoKubernetesSpec struct { // If not set, the default kubernetes version(1.22.2-k31) will be used. // If set, the value must be a valid kubernetes version, you can use the following command to get the valid versions: `civo k3s versions` // Changing the version to a higher version will upgrade the cluster. Note that this may cause breaking changes to the Kubernetes API so please check kubernetes deprecations/mitigations before upgrading. - Version *string `json:"version,omitempty"` + Version *string `json:"version,omitempty"` + Tags []string `json:"tags,omitempty"` + FirewallID *string `json:"firewallId,omitempty"` } // A CivoKubernetesStatus represents the observed state of a CivoKubernetes. diff --git a/apis/civo/cluster/v1alpha1/zz_generated.deepcopy.go b/apis/civo/cluster/v1alpha1/zz_generated.deepcopy.go index 0919e90..4c32f09 100644 --- a/apis/civo/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/apis/civo/cluster/v1alpha1/zz_generated.deepcopy.go @@ -145,6 +145,11 @@ func (in *CivoKubernetesSpec) DeepCopyInto(out *CivoKubernetesSpec) { copy(*out, *in) } out.ConnectionDetails = in.ConnectionDetails + if in.NetworkID != nil { + in, out := &in.NetworkID, &out.NetworkID + *out = new(string) + **out = **in + } if in.CNIPlugin != nil { in, out := &in.CNIPlugin, &out.CNIPlugin *out = new(string) @@ -155,6 +160,16 @@ func (in *CivoKubernetesSpec) DeepCopyInto(out *CivoKubernetesSpec) { *out = new(string) **out = **in } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FirewallID != nil { + in, out := &in.FirewallID, &out.FirewallID + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CivoKubernetesSpec. diff --git a/build b/build index 832bd82..7da2fde 160000 --- a/build +++ b/build @@ -1 +1 @@ -Subproject commit 832bd82745c83908a6eb2086d6726a7366108979 +Subproject commit 7da2fdeb3dc1ebbce8210a58616debe34ef0fd97 diff --git a/examples/civo/cluster/cluster.yaml b/examples/civo/cluster/cluster.yaml index f4f7981..8cf1a5c 100644 --- a/examples/civo/cluster/cluster.yaml +++ b/examples/civo/cluster/cluster.yaml @@ -1,20 +1,25 @@ kind: CivoKubernetes apiVersion: cluster.civo.crossplane.io/v1alpha1 -metadata: +metadata: name: test-crossplane spec: name: test-crossplane + region: FRA1 + networkId: 2b192fd5-7d33-42a9-95a1-faf2d9712871 + firewallId: 2184b1cd-35bc-4dd7-9405-c7486c3c5141 + tags: + - "test-tag" pools: - - id: "8382e422-dcdd-461f-afb4-2ab67f171c3e" + - id: "pool-small" count: 2 size: g3.k3s.small - - id: "8482f422-dcdd-461g-afb4-2ab67f171c3e" - count: 1 + - id: "pool-small-2" + count: 2 size: g3.k3s.small - applications: + applications: - "argo-cd" - "prometheus-operator" - version: "1.22.2-k3s1" + version: "1.23.6-k3s1" connectionDetails: connectionSecretNamePrefix: "cluster-details" connectionSecretNamespace: "default" diff --git a/internal/controller/civokubernetes/civokubernetes.go b/internal/controller/civokubernetes/civokubernetes.go index 96e4dfe..2c69693 100644 --- a/internal/controller/civokubernetes/civokubernetes.go +++ b/internal/controller/civokubernetes/civokubernetes.go @@ -3,15 +3,16 @@ package civokubernetes import ( "context" "fmt" + "sort" "strings" "github.com/civo/civogo" "github.com/crossplane-contrib/provider-civo/apis/civo/cluster/v1alpha1" v1alpha1provider "github.com/crossplane-contrib/provider-civo/apis/civo/provider/v1alpha1" - "github.com/crossplane-contrib/provider-civo/pkg/civocli" xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" "github.com/crossplane/crossplane-runtime/pkg/event" "github.com/crossplane/crossplane-runtime/pkg/logging" + "github.com/crossplane/crossplane-runtime/pkg/meta" "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" "github.com/crossplane/crossplane-runtime/pkg/resource" @@ -38,7 +39,7 @@ type connecter struct { type external struct { kube client.Client - civoClient *civocli.CivoClient + civoClient *civogo.Client } // Setup sets up a Civo Kubernetes controller. @@ -84,7 +85,7 @@ func (c *connecter) Connect(ctx context.Context, mg resource.Managed) (managed.E return nil, errors.New("could not find secret") } - civoClient, err := civocli.NewCivoClient(string(s.Data["credentials"]), providerConfig.Spec.Region) + civoClient, err := civogo.NewClient(string(s.Data["credentials"]), providerConfig.Spec.Region) if err != nil { return nil, err @@ -95,15 +96,15 @@ func (c *connecter) Connect(ctx context.Context, mg resource.Managed) (managed.E }, nil } -//nolint +// nolint func (e *external) Observe(ctx context.Context, mg resource.Managed) (managed.ExternalObservation, error) { cr, ok := mg.(*v1alpha1.CivoKubernetes) if !ok { return managed.ExternalObservation{}, errors.New("invalid object") } - civoCluster, err := e.civoClient.GetK3sCluster(cr.Spec.Name) + civoCluster, err := e.civoClient.GetKubernetesCluster(meta.GetExternalName(cr.GetObjectMeta())) if err != nil { - return managed.ExternalObservation{ResourceExists: false}, err + return managed.ExternalObservation{}, nil } if civoCluster == nil { return managed.ExternalObservation{ResourceExists: false}, nil @@ -142,18 +143,23 @@ func (e *external) Observe(ctx context.Context, mg resource.Managed) (managed.Ex return managed.ExternalObservation{ResourceExists: true}, err } } - // -------------------------------------------- - _, err = e.Update(ctx, mg) - if err != nil { - log.Warnf("update error:%s ", err.Error()) - } - // -------------------------------------------- + // UPDATE CHECK -------------------------------------------- cr.SetConditions(xpv1.Available()) + upToDate, _ := e.ResourceIsUpToDate(ctx, cr, civoCluster) + + if upToDate { + cr.Status.Message = "Cluster is up to date" + } else { + cr.Status.Message = "Cluster is being updated" + } + return managed.ExternalObservation{ ResourceExists: true, - ResourceUpToDate: true, + ResourceUpToDate: upToDate, ConnectionDetails: cd, }, nil + // -------------------------------------------- + case "BUILDING": cr.Status.Message = "Cluster is being created" cr.SetConditions(xpv1.Creating()) @@ -170,19 +176,56 @@ func (e *external) Create(ctx context.Context, mg resource.Managed) (managed.Ext if !ok { return managed.ExternalCreation{}, errors.New("invalid object") } - civoCluster, err := e.civoClient.GetK3sCluster(cr.Spec.Name) + + // at the first call, this id will be the cluster name; civo should return 404 + existingClusterID := meta.GetExternalName(cr.GetObjectMeta()) + civoCluster, err := e.civoClient.GetKubernetesCluster(existingClusterID) if err != nil { - return managed.ExternalCreation{}, err + if civogo.DatabaseKubernetesClusterNotFoundError.Is(err) { + // 404 cluster not found, we continue with the create + } else { + // cluster lookup error, return + return managed.ExternalCreation{}, err + } } if civoCluster != nil { return managed.ExternalCreation{}, nil } + + clusterRegion := cr.Spec.Region + if clusterRegion == "" { + clusterRegion = e.civoClient.Region + } + // Create or Update - err = e.civoClient.CreateNewK3sCluster(cr.Spec.Name, cr.Spec.Pools, cr.Spec.Applications, cr.Spec.CNIPlugin, cr.Spec.Version) + kc := &civogo.KubernetesClusterConfig{ + Name: cr.Spec.Name, + Region: clusterRegion, + NetworkID: *cr.Spec.NetworkID, + Pools: cr.Spec.Pools, + Applications: strings.Join(cr.Spec.Applications, ","), + Tags: strings.Join(cr.Spec.Tags, " "), + } + + if cr.Spec.CNIPlugin != nil { + kc.CNIPlugin = *cr.Spec.CNIPlugin + } + if cr.Spec.Version != nil { + kc.KubernetesVersion = *cr.Spec.Version + } + + if cr.Spec.FirewallID != nil { + kc.InstanceFirewall = *cr.Spec.FirewallID + } + + newCluster, err := e.civoClient.NewKubernetesClusters(kc) if err != nil { + log.Warn("Cluster creation failed", err) return managed.ExternalCreation{}, err } + meta.SetExternalName(cr, newCluster.ID) + cr.SetConditions(xpv1.Creating()) return managed.ExternalCreation{ @@ -190,13 +233,45 @@ func (e *external) Create(ctx context.Context, mg resource.Managed) (managed.Ext }, nil } +func (e *external) ResourceIsUpToDate(ctx context.Context, mg resource.Managed, remote *civogo.KubernetesCluster) (bool, error) { + desired, ok := mg.(*v1alpha1.CivoKubernetes) + if !ok { + return false, errors.New("invalid object") + } + + if len(desired.Spec.Pools) != len(remote.Pools) || !arePoolsEqual(desired, remote) { + return false, nil + } + + if desired.Spec.Version != nil && *desired.Spec.Version > remote.Version { + return false, nil + } + + if stringSlicesNeedUpdate(desired.Spec.Tags, remote.Tags) { + return false, nil + } + + // nolint + var remoteAppNames []string + for _, app := range remote.InstalledApplications { + remoteAppNames = append(remoteAppNames, app.Name) + } + + if stringSlicesNeedUpdate(desired.Spec.Applications, remoteAppNames) { + return false, nil + } + + return true, nil +} + // nolint func (e *external) Update(ctx context.Context, mg resource.Managed) (managed.ExternalUpdate, error) { desiredCivoCluster, ok := mg.(*v1alpha1.CivoKubernetes) if !ok { return managed.ExternalUpdate{}, errors.New("invalid object") } - remoteCivoCluster, err := e.civoClient.GetK3sCluster(desiredCivoCluster.Spec.Name) + desiredClusterID := meta.GetExternalName(desiredCivoCluster.GetObjectMeta()) + remoteCivoCluster, err := e.civoClient.GetKubernetesCluster(desiredClusterID) if err != nil { return managed.ExternalUpdate{}, err } @@ -215,8 +290,11 @@ func (e *external) Update(ctx context.Context, mg resource.Managed) (managed.Ext if len(desiredCivoCluster.Spec.Pools) != len(remoteCivoCluster.Pools) || !arePoolsEqual(desiredCivoCluster, remoteCivoCluster) { log.Debug("Pools are not equal") - //TODO: Set region in the civo client once to avoid passing the providerConfig - if err := e.civoClient.UpdateK3sCluster(desiredCivoCluster, remoteCivoCluster, providerConfig); err != nil { + desiredClusterConfig := &civogo.KubernetesClusterConfig{ + Pools: desiredCivoCluster.Spec.Pools, + Region: desiredCivoCluster.Spec.Region, + } + if _, err := e.civoClient.UpdateKubernetesCluster(desiredClusterID, desiredClusterConfig); err != nil { return managed.ExternalUpdate{}, err } } @@ -224,21 +302,83 @@ func (e *external) Update(ctx context.Context, mg resource.Managed) (managed.Ext if desiredCivoCluster.Spec.Version != nil { if *desiredCivoCluster.Spec.Version > remoteCivoCluster.Version { log.Info("Updating cluster version") - if err := e.civoClient.UpdateK3sClusterVersion(desiredCivoCluster, remoteCivoCluster, providerConfig); err != nil { + desiredClusterConfig := &civogo.KubernetesClusterConfig{ + Name: desiredCivoCluster.Name, + KubernetesVersion: *desiredCivoCluster.Spec.Version, + Region: desiredCivoCluster.Spec.Region, + } + if _, err := e.civoClient.UpdateKubernetesCluster(desiredClusterID, desiredClusterConfig); err != nil { return managed.ExternalUpdate{}, err } } } + if desiredCivoCluster.Spec.FirewallID != nil && desiredCivoCluster.Spec.FirewallID != &remoteCivoCluster.FirewallID { + desiredClusterConfig := &civogo.KubernetesClusterConfig{ + Name: desiredCivoCluster.Name, + InstanceFirewall: *desiredCivoCluster.Spec.FirewallID, + Region: desiredCivoCluster.Spec.Region, + } + if _, err := e.civoClient.UpdateKubernetesCluster(desiredClusterID, desiredClusterConfig); err != nil { + return managed.ExternalUpdate{}, err + } + } + + if stringSlicesNeedUpdate(desiredCivoCluster.Spec.Tags, remoteCivoCluster.Tags) { + desiredClusterConfig := &civogo.KubernetesClusterConfig{ + Name: desiredCivoCluster.Name, + Tags: strings.Join(desiredCivoCluster.Spec.Tags, " "), + Region: desiredCivoCluster.Spec.Region, + } + if _, err := e.civoClient.UpdateKubernetesCluster(desiredClusterID, desiredClusterConfig); err != nil { + return managed.ExternalUpdate{}, err + } + } + + // nolint + var remoteAppNames []string + for _, app := range remoteCivoCluster.InstalledApplications { + remoteAppNames = append(remoteAppNames, app.Name) + } + + if stringSlicesNeedUpdate(desiredCivoCluster.Spec.Applications, remoteAppNames) { + desiredClusterConfig := &civogo.KubernetesClusterConfig{ + Name: desiredCivoCluster.Name, + Applications: strings.Join(desiredCivoCluster.Spec.Applications, " "), + Region: desiredCivoCluster.Spec.Region, + } + if _, err := e.civoClient.UpdateKubernetesCluster(desiredClusterID, desiredClusterConfig); err != nil { + return managed.ExternalUpdate{}, err + } + } + return managed.ExternalUpdate{}, nil } +func stringSlicesNeedUpdate(desired, remote []string) bool { + if len(desired) != len(remote) { + return true + } else if len(desired) == 0 { + return false + } + + sort.Strings(desired) + sort.Strings(remote) + + for i := range desired { + if desired[i] != remote[i] { + return true + } + } + return false +} + func (e *external) Delete(ctx context.Context, mg resource.Managed) error { cr, ok := mg.(*v1alpha1.CivoKubernetes) if !ok { return nil } - civoCluster, err := e.civoClient.GetK3sCluster(cr.Spec.Name) + civoCluster, err := e.civoClient.GetKubernetesCluster(meta.GetExternalName(cr.GetObjectMeta())) if err != nil { return err } @@ -262,7 +402,8 @@ func (e *external) Delete(ctx context.Context, mg resource.Managed) error { // ------------------------------------------------ cr.Status.Message = deletionMessage cr.SetConditions(xpv1.Deleting()) - return e.civoClient.DeleteK3sCluster(civoCluster.Name) + _, err = e.civoClient.DeleteKubernetesCluster(civoCluster.ID) + return err } func arePoolsEqual(desiredCivoCluster *v1alpha1.CivoKubernetes, remoteCivoCluster *civogo.KubernetesCluster) bool { diff --git a/internal/controller/civokubernetes/civokubernetes_test.go b/internal/controller/civokubernetes/civokubernetes_test.go new file mode 100644 index 0000000..2c41656 --- /dev/null +++ b/internal/controller/civokubernetes/civokubernetes_test.go @@ -0,0 +1,340 @@ +package civokubernetes + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/civo/civogo" + "github.com/crossplane-contrib/provider-civo/apis/civo/cluster/v1alpha1" + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/meta" + cptest "github.com/crossplane/crossplane-runtime/pkg/test" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const testClusterExternalID = "69a23478-a89e-41d2-97b1-6f4c341cee70" + +func getFakeClusterv1Alpha1() *v1alpha1.CivoKubernetes { + networkID := "fake-network-id" + firewallID := "fake-firewall-id" + version := "1.22.2-k3s1" + cnipluginflannel := "flannel" + + return &v1alpha1.CivoKubernetes{ + TypeMeta: v1.TypeMeta{ + Kind: v1alpha1.CivoKubernetesKind, + APIVersion: v1alpha1.CivoKubernetesKindAPIVersion, + }, + ObjectMeta: v1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: v1alpha1.CivoKubernetesSpec{ + ResourceSpec: getFakeResourceSpec(), + Name: "test-cluster", + Region: "LON1", + NetworkID: &networkID, + FirewallID: &firewallID, + Version: &version, + CNIPlugin: &cnipluginflannel, // required + Pools: []civogo.KubernetesClusterPoolConfig{ + getFakePool(1), + }, + Applications: []string{"kubernetes-dashboard"}, + }, + } +} + +func getFakePool(id int) civogo.KubernetesClusterPoolConfig { + return civogo.KubernetesClusterPoolConfig{ + ID: fmt.Sprintf("test-pool-%d", id), + Count: 1, + Size: "g3.k3s.small", + } +} + +func getFakeResourceSpec() xpv1.ResourceSpec { + return xpv1.ResourceSpec{ + ProviderConfigReference: &xpv1.Reference{ + Name: "test-provider", + }, + WriteConnectionSecretToReference: &xpv1.SecretReference{ + Name: "connection-secret", + }, + DeletionPolicy: xpv1.DeletionOrphan, + } +} + +func TestUpdate(t *testing.T) { + testCasesConfig := []ConfigErrorClientForTesting{ + { + Method: "GET", + Value: []ValueErrorClientForTesting{ + { + URL: fmt.Sprintf("/v2/kubernetes/clusters/%s", testClusterExternalID), + ResponseBody: fmt.Sprintf(`{ + "id": "%s", + "name": "test-cluster", + "version": "1.22.2-k3s1", + "cluster_type": "k3s", + "status": "ACTIVE", + "ready": true, + "num_target_nodes": 1, + "target_nodes_size": "g3.k3s.small", + "built_at": "0001-01-01T00:00:00Z", + "kubernetes_version": "1.23.6-k3s1", + "created_at": "2023-03-01T19:24:36Z", + "required_pools": [ + { + "id": "node-pool", + "size": "g3.k3s.small", + "count": 1 + } + ], + "firewall_id": "test-firewall-id", + "master_ipv6": "", + "applications": "kubernetes-dashboard", + "network_id": "test-network-id", + "pools": [{ + "id": "node-pool", + "size": "g3.k3s.small", + "count": 1 + }] + }`, testClusterExternalID), + }, + }, + }, + { + Method: "PUT", + Value: []ValueErrorClientForTesting{ + { + URL: "/v2/kubernetes/clusters/", + RequestBody: `{"region":"TEST","pools":[{"id":"test-pool-1","count":1,"size":"g3.k3s.small"},{"id":"test-pool-2","count":1,"size":"g3.k3s.small"}]}`, + ResponseBody: fmt.Sprintf(`{ + "id": "%s" + }`, testClusterExternalID), + }, + { + URL: "/v2/kubernetes/clusters/", + RequestBody: `{"name":"test-cluster","region":"TEST","instance_firewall":"fake-firewall-id"}`, + ResponseBody: fmt.Sprintf(`{ + "id": "%s" + }`, testClusterExternalID), + }, + { + URL: "/v2/kubernetes/clusters/", + RequestBody: `{"name":"test-cluster","region":"TEST","tags":"lets-add-a-test-tag"}`, + ResponseBody: fmt.Sprintf(`{ + "id": "%s" + }`, testClusterExternalID), + }, + { + URL: "/v2/kubernetes/clusters/", + RequestBody: `{"name":"test-cluster","region":"TEST","applications":"cilium istio"}`, + ResponseBody: fmt.Sprintf(`{ + "id": "%s" + }`, testClusterExternalID), + }, + }, + }, + } + civoClient, server, results, err := NewErrorClientForTesting(testCasesConfig) + if err != nil { + t.Error(err) + } + defer server.Close() + + mockExternal := &external{ + kube: cptest.NewMockClient(), + civoClient: civoClient, + } + + ctx := context.Background() + cr := getFakeClusterv1Alpha1() + meta.SetExternalName(cr, testClusterExternalID) // since this is an update, we already have an external ID + cr.Spec.Pools = append(cr.Spec.Pools, getFakePool(2)) + cr.Spec.Applications = []string{"cilium", "istio"} // TODO: this should remove kubernetes-dashboard + cr.Spec.Tags = []string{"lets-add-a-test-tag"} + + _, errU := mockExternal.Update(ctx, cr) + if errU != nil { + t.Error(errU) + } + + if results.Completed[0].URL.Path != "/v2/kubernetes/clusters/"+testClusterExternalID { + // make sure that the function called the GET url + t.Log("Test did not call GET url properly") + t.FailNow() + } + + for _, req := range results.Failed { + body, _ := io.ReadAll(req.Body) + t.Errorf("Could not match request %s %s\nBody: \"%s\"", + req.Method, + req.URL.String(), + body, + ) + } +} + +func TestCreate(t *testing.T) { + t.Helper() + + testCasesConfig := []ConfigErrorClientForTesting{ + { + Method: "GET", + Value: []ValueErrorClientForTesting{ + { + URL: "/v2/kubernetes/clusters", + StatusCode: 404, + ResponseBody: `{"code":"database_kubernetes_cluster_not_found"}`, + }, + }, + }, + { + Method: "POST", + Value: []ValueErrorClientForTesting{ + { + URL: "/v2/kubernetes/clusters", + StatusCode: 200, + ResponseBody: fmt.Sprintf(`{ + "id": "%s", + "name": "your-cluster-name", + "version": "2", + "status": "ACTIVE", + "ready": true, + "num_target_nodes": 1, + "target_nodes_size": "g2.xsmall", + "built_at": "2019-09-23T13:04:23.000+01:00", + "kubeconfig": "YAML_VERSION_OF_KUBECONFIG_HERE\n", + "kubernetes_version": "0.8.1", + "api_endpoint": "https://your.cluster.ip.address:6443", + "master_ip": "your.cluster.ip.address", + "dns_entry": "69a23478-a89e-41d2-97b1-6f4c341cee70.k8s.civo.com", + "tags": [], + "created_at": "2019-09-23T13:02:59.000+01:00", + "firewall_id": "test-firewall-id", + "cni_plugin": "flannel" + }`, testClusterExternalID), + }, + }, + }, + } + + civoClient, server, results, err := NewErrorClientForTesting(testCasesConfig) + if err != nil { + t.Error(err) + } + defer server.Close() + + mockExternal := &external{ + kube: cptest.NewMockClient(), + civoClient: civoClient, + } + + ctx := context.Background() + cr := getFakeClusterv1Alpha1() + createOp, err := mockExternal.Create(ctx, cr) + if err != nil { + t.Error(err) + } + + if createOp.ExternalNameAssigned == false { + t.Error("ExternalName was not assigned") + } + + if meta.GetExternalName(cr) != testClusterExternalID { + t.Errorf("Wrong external name, expected %s, found %s", testClusterExternalID, meta.GetExternalName(cr)) + } + + for _, req := range results.Failed { + body, _ := io.ReadAll(req.Body) + t.Errorf("Could not match request %s %s\nBody: \"%s\"", + req.Method, + req.URL.String(), + body, + ) + } +} + +type ConfigErrorClientForTesting struct { + Method string + Value []ValueErrorClientForTesting +} + +type ValueErrorClientForTesting struct { + RequestBody string + URL string + ResponseBody string + Handler func(rw http.ResponseWriter, req *http.Request) + StatusCode int +} + +type ErrorClientResults struct { + Completed []*http.Request + Failed []*http.Request +} + +// NewAdvancedClientForTesting initializes a Client connecting to a local test server +// it allows for specifying methods and records and returns all requests made +func NewErrorClientForTesting(responses []ConfigErrorClientForTesting) (*civogo.Client, *httptest.Server, *ErrorClientResults, error) { + results := &ErrorClientResults{} + + server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + var responseSent bool + + body, err := io.ReadAll(req.Body) + if err != nil { + log.Printf("Error reading body: %v", err) + return + } + + req.Body = io.NopCloser(bytes.NewBuffer(body)) + + for _, criteria := range responses { + // we check the HTTP method first + if req.Method != criteria.Method { + continue + } + + for _, criteria := range criteria.Value { + if !strings.HasPrefix(req.URL.Path, criteria.URL) { + // simple match on request body by prefix + continue + } + + if !strings.HasPrefix(string(body), criteria.RequestBody) { + // match on request body by prefix, so we can pass "" to match all + continue + } + + responseSent = true + + if criteria.StatusCode > 0 { + rw.WriteHeader(criteria.StatusCode) + } + rw.Write([]byte(criteria.ResponseBody)) + } + } + + if responseSent { + results.Completed = append(results.Completed, req) + } else { + results.Failed = append(results.Failed, req) + fmt.Println("failed to find a matching request") + fmt.Printf("%s %s\n", req.Method, req.URL.String()) + fmt.Println("Request Body: ", string(body)) + rw.Write([]byte(`{"result": "failed to find a matching request handler"}`)) + } + })) + + client, err := civogo.NewClientForTestingWithServer(server) + + return client, server, results, err +} diff --git a/internal/controller/civokubernetes/strings_test.go b/internal/controller/civokubernetes/strings_test.go new file mode 100644 index 0000000..aed8331 --- /dev/null +++ b/internal/controller/civokubernetes/strings_test.go @@ -0,0 +1,66 @@ +package civokubernetes + +import "testing" + +func TestStringSlicesNeedUpdate_False(t *testing.T) { + + type testCaseConfig struct { + name string + a, b []string + expect bool + } + + testCases := []testCaseConfig{ + { + name: "same-length", + a: []string{"apple", "banana", "pear"}, + b: []string{"apple", "banana", "pear"}, + expect: false, + }, + { + name: "zero-length", + a: []string{}, + b: []string{}, + expect: false, + }, + { + name: "add-element", + a: []string{"apple", "banana", "pear"}, + b: []string{"apple", "banana", "pear", "strawberry"}, + expect: true, + }, + { + name: "remove-element", + a: []string{"apple", "banana", "pear"}, + b: []string{"apple", "banana"}, + expect: true, + }, + { + name: "replace-element", + a: []string{"apple", "banana", "pear"}, + b: []string{"apple", "banana", "strawberry"}, + expect: true, + }, + { + name: "out-of-order-same-elements", + a: []string{"pear", "banana", "apple"}, + b: []string{"apple", "pear", "banana"}, + expect: false, + }, + { + name: "out-of-order-different-elements", + a: []string{"pear", "banana", "apple"}, + b: []string{"apple", "strawberry", "banana"}, + expect: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + result := stringSlicesNeedUpdate(testCase.a, testCase.b) + if result != testCase.expect { + t.Errorf("expected %v, actual %v", testCase.expect, result) + } + }) + } +} diff --git a/package/crds/cluster.civo.crossplane.io_civokubernetes.yaml b/package/crds/cluster.civo.crossplane.io_civokubernetes.yaml index b50b97f..5378176 100644 --- a/package/crds/cluster.civo.crossplane.io_civokubernetes.yaml +++ b/package/crds/cluster.civo.crossplane.io_civokubernetes.yaml @@ -85,8 +85,15 @@ spec: - Orphan - Delete type: string + firewallId: + type: string name: type: string + networkId: + description: 'NOTE: This can only be set at creation time. Changing + this value after creation will not move the cluster into another + network..' + type: string pools: items: description: KubernetesClusterPoolConfig is used to create a new @@ -122,6 +129,12 @@ spec: required: - name type: object + region: + type: string + tags: + items: + type: string + type: array version: default: 1.22.2-k3s1 description: 'If not set, the default kubernetes version(1.22.2-k31) @@ -151,6 +164,7 @@ spec: required: - connectionDetails - name + - networkId - pools type: object status: