diff --git a/pkg/comp-functions/functions/common/resources.go b/pkg/comp-functions/functions/common/resources.go index bd75c5b2e4..60da872a8e 100644 --- a/pkg/comp-functions/functions/common/resources.go +++ b/pkg/comp-functions/functions/common/resources.go @@ -3,43 +3,133 @@ package common import ( vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" "github.com/vshn/appcat/v4/pkg/common/utils" + "k8s.io/apimachinery/pkg/api/resource" ) type Resources struct { - ReqMem string - ReqCPU string - Mem string - CPU string - Disk string + ReqMem resource.Quantity + ReqCPU resource.Quantity + Mem resource.Quantity + CPU resource.Quantity + Disk resource.Quantity } -func GetResources(size *vshnv1.VSHNSizeSpec, r utils.Resources) Resources { - reqMem := size.Requests.Memory - reqCPU := size.Requests.CPU - mem := size.Memory - cpu := size.CPU - disk := size.Disk +// GetResources will return a `Resources` object with the correctly calculated requests, +// limits and disk space according to the definitions in the plan as well as the overrides +// in the claim. +func GetResources(size *vshnv1.VSHNSizeSpec, plan utils.Resources) (Resources, []error) { + reqMem := resource.Quantity{} + reqCPU := resource.Quantity{} + mem := resource.Quantity{} + cpu := resource.Quantity{} + disk := plan.Disk - if reqMem == "" { - reqMem = r.MemoryRequests.String() + var errors []error + var err error + + if size.Requests.Memory != "" { + reqMem, err = resource.ParseQuantity(size.Requests.Memory) + if err != nil { + errors = append(errors, err) + } } - if reqCPU == "" { - reqCPU = r.CPURequests.String() + + if size.Requests.CPU != "" { + reqCPU, err = resource.ParseQuantity(size.Requests.CPU) + if err != nil { + errors = append(errors, err) + } } - if mem == "" { - mem = r.MemoryLimits.String() + + if size.Memory != "" { + mem, err = resource.ParseQuantity(size.Memory) + if err != nil { + errors = append(errors, err) + } } - if cpu == "" { - cpu = r.CPULimits.String() + + if size.CPU != "" { + cpu, err = resource.ParseQuantity(size.CPU) + if err != nil { + errors = append(errors, err) + } } - if disk == "" { - disk = r.Disk.String() + + if size.Disk != "" { + disk, err = resource.ParseQuantity(size.Disk) + if err != nil { + errors = append(errors, err) + } } + + memLimit := getLimit(reqMem, mem, plan.MemoryLimits) + memReq := getRequest(reqMem, memLimit, plan.MemoryRequests) + + cpuLimit := getLimit(reqCPU, cpu, plan.CPULimits) + cpuReq := getRequest(reqCPU, cpuLimit, plan.CPURequests) + return Resources{ - ReqMem: reqMem, - ReqCPU: reqCPU, - Mem: mem, - CPU: cpu, + ReqMem: memReq, + ReqCPU: cpuReq, + Mem: memLimit, + CPU: cpuLimit, Disk: disk, + }, errors +} + +// getLimit will compare a given limit and request as well as the limit defined in the plan +// and return the appropriate limit. +// The limit is chosen based on the following logic: +// - If claim contains a limit but no request: Use the limit from the claim as the final limit +// - If claim contains a limit and a request: Use the higher of both as the final limit +// This avoids having higher requests then limits, which is not supported in k8s. +// - If claim contains only request: Use the higher of request and limit in the plan as the final limit +// this avoids having higher requests then limits, which is not supported in k8s. +// - If no limit or request is defined in the claim, use the plans limit as the final limit. +func getLimit(req, limit, planLimit resource.Quantity) resource.Quantity { + + finalMem := resource.Quantity{} + + if !limit.IsZero() && req.IsZero() { + finalMem = limit + } else if !limit.IsZero() && !req.IsZero() { + finalMem = getHigherQuantity(req, limit) + } else if limit.IsZero() && !req.IsZero() { + finalMem = getHigherQuantity(req, planLimit) + } else { + finalMem = planLimit + } + return finalMem +} + +// getRequest will compare a given limit and request as well as the limit defined in the plan +// and return the appropriate request. +// This function assumes, that the given limit is already calculated with the `getLimit` function +// and passed to this function. +// The request is chosen based on the following logic: +// - If no requests is defined in the claim, use the plans request as the final requests. +// - If the claim contains a limt and request, use the lower of both as the final request. +// This avoids having higher requests then limits, which is not supported in k8s. +func getRequest(req, limit, planRequests resource.Quantity) resource.Quantity { + + finalReq := req + + if req.IsZero() { + finalReq = planRequests + } + + if limit.Cmp(finalReq) == -1 { + finalReq = limit + } + + return finalReq +} + +// getHigherQuantity will compare the given quantities and return the higher one. +func getHigherQuantity(a, b resource.Quantity) resource.Quantity { + + if a.Cmp(b) == -1 { + return b } + return a } diff --git a/pkg/comp-functions/functions/common/resources_test.go b/pkg/comp-functions/functions/common/resources_test.go new file mode 100644 index 0000000000..7579f41300 --- /dev/null +++ b/pkg/comp-functions/functions/common/resources_test.go @@ -0,0 +1,111 @@ +package common + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" + "github.com/vshn/appcat/v4/pkg/common/utils" + "github.com/vshn/appcat/v4/pkg/comp-functions/functions/commontest" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestResources(t *testing.T) { + + ctx := context.Background() + svc := commontest.LoadRuntimeFromFile(t, "plans.yaml") + planResources, err := utils.FetchPlansFromConfig(ctx, svc, svc.Config.Data["defaultPlan"]) + assert.NoError(t, err) + + tests := []struct { + name string + claimResources vshnv1.VSHNSizeSpec + expResult Resources + }{ + { + name: "GivenDefaultPlanNoResources", + claimResources: vshnv1.VSHNSizeSpec{}, + expResult: Resources{ + ReqMem: planResources.MemoryRequests, + ReqCPU: planResources.CPURequests, + Mem: planResources.MemoryLimits, + CPU: planResources.CPULimits, + Disk: planResources.Disk, + }, + }, + { + name: "GivenDefaultPlanCustomLimitHigherThanPlan", + claimResources: vshnv1.VSHNSizeSpec{ + Memory: "2Gi", + CPU: "500m", + }, + expResult: Resources{ + ReqMem: planResources.MemoryRequests, + ReqCPU: planResources.CPURequests, + Mem: resource.MustParse("2Gi"), + CPU: resource.MustParse("500m"), + Disk: planResources.Disk, + }, + }, + { + name: "GivenDefaultPlanCustomMemoryLimitLowerThanPlan", + claimResources: vshnv1.VSHNSizeSpec{ + Memory: "100Mi", + CPU: "100m", + }, + expResult: Resources{ + ReqMem: resource.MustParse("100Mi"), + ReqCPU: resource.MustParse("100m"), + Mem: resource.MustParse("100Mi"), + CPU: resource.MustParse("100m"), + Disk: planResources.Disk, + }, + }, + { + name: "GivenDefaultPlanCustomMemoryRequestsHigherThanPlan", + claimResources: vshnv1.VSHNSizeSpec{ + Requests: vshnv1.VSHNDBaaSSizeRequestsSpec{ + Memory: "2Gi", + CPU: "1", + }, + }, + expResult: Resources{ + ReqMem: resource.MustParse("2Gi"), + ReqCPU: resource.MustParse("1"), + Mem: resource.MustParse("2Gi"), + CPU: resource.MustParse("1"), + Disk: planResources.Disk, + }, + }, + { + name: "GivenDefaultPlanCustomMemoryRequestsHigherThanLimit", + claimResources: vshnv1.VSHNSizeSpec{ + Requests: vshnv1.VSHNDBaaSSizeRequestsSpec{ + Memory: "2Gi", + CPU: "1", + }, + Memory: "500Mi", + CPU: "500m", + }, + expResult: Resources{ + ReqMem: resource.MustParse("2Gi"), + ReqCPU: resource.MustParse("1"), + Mem: resource.MustParse("2Gi"), + CPU: resource.MustParse("1"), + Disk: planResources.Disk, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + res, errs := GetResources(&tt.claimResources, planResources) + assert.NoError(t, errors.Join(errs...)) + + assert.Equal(t, tt.expResult, res) + }) + } +} diff --git a/pkg/comp-functions/functions/vshnkeycloak/deploy.go b/pkg/comp-functions/functions/vshnkeycloak/deploy.go index c95eb9469a..1804300527 100644 --- a/pkg/comp-functions/functions/vshnkeycloak/deploy.go +++ b/pkg/comp-functions/functions/vshnkeycloak/deploy.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "dario.cat/mergo" @@ -225,6 +226,7 @@ func addRelease(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.V } func getResources(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNKeycloak) (common.Resources, error) { + l := svc.Log plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"]) resources, err := utils.FetchPlansFromConfig(ctx, svc, plan) @@ -233,7 +235,10 @@ func getResources(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1 return common.Resources{}, err } - res := common.GetResources(&comp.Spec.Parameters.Size, resources) + res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources) + if len(errs) != 0 { + l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim") + } return res, nil } @@ -411,12 +416,12 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS }, "resources": map[string]any{ "requests": map[string]any{ - "memory": res.ReqMem, - "cpu": res.ReqCPU, + "memory": res.ReqMem.String(), + "cpu": res.ReqCPU.String(), }, "limits": map[string]any{ - "memory": res.Mem, - "cpu": res.CPU, + "memory": res.Mem.String(), + "cpu": res.CPU.String(), }, }, "nodeSelector": nodeSelector, diff --git a/pkg/comp-functions/functions/vshnmariadb/mariadb_deploy.go b/pkg/comp-functions/functions/vshnmariadb/mariadb_deploy.go index 33f2d2e71e..a874bc9d0f 100644 --- a/pkg/comp-functions/functions/vshnmariadb/mariadb_deploy.go +++ b/pkg/comp-functions/functions/vshnmariadb/mariadb_deploy.go @@ -6,7 +6,11 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" + "io" + "strconv" + xfnproto "github.com/crossplane/function-sdk-go/proto/v1beta1" xhelmbeta1 "github.com/vshn/appcat/v4/apis/helm/release/v1beta1" vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" @@ -14,12 +18,10 @@ import ( "github.com/vshn/appcat/v4/pkg/comp-functions/functions/common" "github.com/vshn/appcat/v4/pkg/comp-functions/functions/common/maintenance" "github.com/vshn/appcat/v4/pkg/comp-functions/runtime" - "io" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" - "strconv" ) const ( @@ -136,7 +138,7 @@ func getConnectionDetails(comp *vshnv1.VSHNMariaDB, svc *runtime.ServiceRuntime, } func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNMariaDB, secretName string) (map[string]interface{}, error) { - + l := svc.Log values := map[string]interface{}{} plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"]) @@ -147,7 +149,10 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS return values, err } - res := common.GetResources(&comp.Spec.Parameters.Size, resources) + res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources) + if len(errs) != 0 { + l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim") + } nodeSelector, err := utils.FetchNodeSelectorFromConfig(ctx, svc, plan, comp.Spec.Parameters.Scheduling.NodeSelector) if err != nil { @@ -160,12 +165,12 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS "replicaCount": 1, "resources": map[string]interface{}{ "requests": map[string]interface{}{ - "memory": res.ReqMem, - "cpu": res.ReqCPU, + "memory": res.ReqMem.String(), + "cpu": res.ReqCPU.String(), }, "limits": map[string]interface{}{ - "memory": res.Mem, - "cpu": res.CPU, + "memory": res.Mem.String(), + "cpu": res.CPU.String(), }, }, "networkPolicy": map[string]interface{}{ @@ -179,7 +184,7 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS "certCAFilename": "ca.crt", }, "persistence": map[string]interface{}{ - "size": res.Disk, + "size": res.Disk.String(), "storageClass": comp.Spec.Parameters.StorageClass, }, "startupProbe": map[string]interface{}{ diff --git a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go index 3f1b4d8810..ddc4017f0e 100644 --- a/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go +++ b/pkg/comp-functions/functions/vshnpostgres/postgresql_deploy.go @@ -4,6 +4,7 @@ import ( "context" _ "embed" "encoding/json" + "errors" "fmt" "strings" "time" @@ -182,6 +183,7 @@ func createStackgresObjects(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, sv } func createSgInstanceProfile(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { + l := svc.Log plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"]) resources, err := utils.FetchPlansFromConfig(ctx, svc, plan) @@ -224,7 +226,10 @@ func createSgInstanceProfile(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, s "setDbopsRunning": "dbops.set-dbops-running", } - res := common.GetResources(&comp.Spec.Parameters.Size, resources) + res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources) + if len(errs) != 0 { + l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim") + } containersRequests := generateContainers(*containers, sideCarMap, false) containersRequestsBytes, err := json.Marshal(containersRequests) @@ -255,11 +260,11 @@ func createSgInstanceProfile(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, s Namespace: comp.GetInstanceNamespace(), }, Spec: sgv1.SGInstanceProfileSpec{ - Cpu: res.CPU, - Memory: res.Mem, + Cpu: res.CPU.String(), + Memory: res.Mem.String(), Requests: &sgv1.SGInstanceProfileSpecRequests{ - Cpu: &res.ReqCPU, - Memory: &res.ReqMem, + Cpu: ptr.To(res.ReqCPU.String()), + Memory: ptr.To(res.ReqMem.String()), Containers: k8sruntime.RawExtension{ Raw: containersRequestsBytes, }, @@ -341,6 +346,8 @@ func createSgPostgresConfig(comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRun func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runtime.ServiceRuntime) error { + l := svc.Log + plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"]) resources, err := utils.FetchPlansFromConfig(ctx, svc, plan) @@ -349,7 +356,10 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt return err } - res := common.GetResources(&comp.Spec.Parameters.Size, resources) + res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources) + if len(errs) != 0 { + l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim") + } nodeSelector, err := utils.FetchNodeSelectorFromConfig(ctx, svc, plan, comp.Spec.Parameters.Scheduling.NodeSelector) if err != nil { @@ -411,7 +421,7 @@ func createSgCluster(ctx context.Context, comp *vshnv1.VSHNPostgreSQL, svc *runt }, Pods: sgv1.SGClusterSpecPods{ PersistentVolume: sgv1.SGClusterSpecPodsPersistentVolume{ - Size: res.Disk, + Size: res.Disk.String(), }, Resources: &sgv1.SGClusterSpecPodsResources{ EnableClusterLimitsRequirements: ptr.To(true), diff --git a/test/functions/plans.yaml b/test/functions/plans.yaml new file mode 100644 index 0000000000..223610dc89 --- /dev/null +++ b/test/functions/plans.yaml @@ -0,0 +1,9 @@ +desired: {} +input: + apiVersion: v1 + data: + defaultPlan: standard-1 + plans: + '{"standard-1": {"size": {"cpu": "250m", "disk": "16Gi", "enabled": true, + "memory": "1Gi"}}}' +observed: {}