Skip to content

Commit

Permalink
Merge pull request #184 from vshn/fix/resource_with_plans
Browse files Browse the repository at this point in the history
Refactor and fix Resource calculations
  • Loading branch information
zugao authored Jul 1, 2024
2 parents 3410a63 + 2402801 commit 37e2aee
Show file tree
Hide file tree
Showing 6 changed files with 276 additions and 46 deletions.
140 changes: 115 additions & 25 deletions pkg/comp-functions/functions/common/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,43 +3,133 @@ package common
import (
vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1"
"github.com/vshn/appcat/v4/pkg/common/utils"
"k8s.io/apimachinery/pkg/api/resource"
)

type Resources struct {
ReqMem string
ReqCPU string
Mem string
CPU string
Disk string
ReqMem resource.Quantity
ReqCPU resource.Quantity
Mem resource.Quantity
CPU resource.Quantity
Disk resource.Quantity
}

func GetResources(size *vshnv1.VSHNSizeSpec, r utils.Resources) Resources {
reqMem := size.Requests.Memory
reqCPU := size.Requests.CPU
mem := size.Memory
cpu := size.CPU
disk := size.Disk
// GetResources will return a `Resources` object with the correctly calculated requests,
// limits and disk space according to the definitions in the plan as well as the overrides
// in the claim.
func GetResources(size *vshnv1.VSHNSizeSpec, plan utils.Resources) (Resources, []error) {
reqMem := resource.Quantity{}
reqCPU := resource.Quantity{}
mem := resource.Quantity{}
cpu := resource.Quantity{}
disk := plan.Disk

if reqMem == "" {
reqMem = r.MemoryRequests.String()
var errors []error
var err error

if size.Requests.Memory != "" {
reqMem, err = resource.ParseQuantity(size.Requests.Memory)
if err != nil {
errors = append(errors, err)
}
}
if reqCPU == "" {
reqCPU = r.CPURequests.String()

if size.Requests.CPU != "" {
reqCPU, err = resource.ParseQuantity(size.Requests.CPU)
if err != nil {
errors = append(errors, err)
}
}
if mem == "" {
mem = r.MemoryLimits.String()

if size.Memory != "" {
mem, err = resource.ParseQuantity(size.Memory)
if err != nil {
errors = append(errors, err)
}
}
if cpu == "" {
cpu = r.CPULimits.String()

if size.CPU != "" {
cpu, err = resource.ParseQuantity(size.CPU)
if err != nil {
errors = append(errors, err)
}
}
if disk == "" {
disk = r.Disk.String()

if size.Disk != "" {
disk, err = resource.ParseQuantity(size.Disk)
if err != nil {
errors = append(errors, err)
}
}

memLimit := getLimit(reqMem, mem, plan.MemoryLimits)
memReq := getRequest(reqMem, memLimit, plan.MemoryRequests)

cpuLimit := getLimit(reqCPU, cpu, plan.CPULimits)
cpuReq := getRequest(reqCPU, cpuLimit, plan.CPURequests)

return Resources{
ReqMem: reqMem,
ReqCPU: reqCPU,
Mem: mem,
CPU: cpu,
ReqMem: memReq,
ReqCPU: cpuReq,
Mem: memLimit,
CPU: cpuLimit,
Disk: disk,
}, errors
}

// getLimit will compare a given limit and request as well as the limit defined in the plan
// and return the appropriate limit.
// The limit is chosen based on the following logic:
// - If claim contains a limit but no request: Use the limit from the claim as the final limit
// - If claim contains a limit and a request: Use the higher of both as the final limit
// This avoids having higher requests then limits, which is not supported in k8s.
// - If claim contains only request: Use the higher of request and limit in the plan as the final limit
// this avoids having higher requests then limits, which is not supported in k8s.
// - If no limit or request is defined in the claim, use the plans limit as the final limit.
func getLimit(req, limit, planLimit resource.Quantity) resource.Quantity {

finalMem := resource.Quantity{}

if !limit.IsZero() && req.IsZero() {
finalMem = limit
} else if !limit.IsZero() && !req.IsZero() {
finalMem = getHigherQuantity(req, limit)
} else if limit.IsZero() && !req.IsZero() {
finalMem = getHigherQuantity(req, planLimit)
} else {
finalMem = planLimit
}
return finalMem
}

// getRequest will compare a given limit and request as well as the limit defined in the plan
// and return the appropriate request.
// This function assumes, that the given limit is already calculated with the `getLimit` function
// and passed to this function.
// The request is chosen based on the following logic:
// - If no requests is defined in the claim, use the plans request as the final requests.
// - If the claim contains a limt and request, use the lower of both as the final request.
// This avoids having higher requests then limits, which is not supported in k8s.
func getRequest(req, limit, planRequests resource.Quantity) resource.Quantity {

finalReq := req

if req.IsZero() {
finalReq = planRequests
}

if limit.Cmp(finalReq) == -1 {
finalReq = limit
}

return finalReq
}

// getHigherQuantity will compare the given quantities and return the higher one.
func getHigherQuantity(a, b resource.Quantity) resource.Quantity {

if a.Cmp(b) == -1 {
return b
}
return a
}
111 changes: 111 additions & 0 deletions pkg/comp-functions/functions/common/resources_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
package common

import (
"context"
"errors"
"testing"

"github.com/stretchr/testify/assert"
vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1"
"github.com/vshn/appcat/v4/pkg/common/utils"
"github.com/vshn/appcat/v4/pkg/comp-functions/functions/commontest"
"k8s.io/apimachinery/pkg/api/resource"
)

func TestResources(t *testing.T) {

ctx := context.Background()
svc := commontest.LoadRuntimeFromFile(t, "plans.yaml")
planResources, err := utils.FetchPlansFromConfig(ctx, svc, svc.Config.Data["defaultPlan"])
assert.NoError(t, err)

tests := []struct {
name string
claimResources vshnv1.VSHNSizeSpec
expResult Resources
}{
{
name: "GivenDefaultPlanNoResources",
claimResources: vshnv1.VSHNSizeSpec{},
expResult: Resources{
ReqMem: planResources.MemoryRequests,
ReqCPU: planResources.CPURequests,
Mem: planResources.MemoryLimits,
CPU: planResources.CPULimits,
Disk: planResources.Disk,
},
},
{
name: "GivenDefaultPlanCustomLimitHigherThanPlan",
claimResources: vshnv1.VSHNSizeSpec{
Memory: "2Gi",
CPU: "500m",
},
expResult: Resources{
ReqMem: planResources.MemoryRequests,
ReqCPU: planResources.CPURequests,
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("500m"),
Disk: planResources.Disk,
},
},
{
name: "GivenDefaultPlanCustomMemoryLimitLowerThanPlan",
claimResources: vshnv1.VSHNSizeSpec{
Memory: "100Mi",
CPU: "100m",
},
expResult: Resources{
ReqMem: resource.MustParse("100Mi"),
ReqCPU: resource.MustParse("100m"),
Mem: resource.MustParse("100Mi"),
CPU: resource.MustParse("100m"),
Disk: planResources.Disk,
},
},
{
name: "GivenDefaultPlanCustomMemoryRequestsHigherThanPlan",
claimResources: vshnv1.VSHNSizeSpec{
Requests: vshnv1.VSHNDBaaSSizeRequestsSpec{
Memory: "2Gi",
CPU: "1",
},
},
expResult: Resources{
ReqMem: resource.MustParse("2Gi"),
ReqCPU: resource.MustParse("1"),
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1"),
Disk: planResources.Disk,
},
},
{
name: "GivenDefaultPlanCustomMemoryRequestsHigherThanLimit",
claimResources: vshnv1.VSHNSizeSpec{
Requests: vshnv1.VSHNDBaaSSizeRequestsSpec{
Memory: "2Gi",
CPU: "1",
},
Memory: "500Mi",
CPU: "500m",
},
expResult: Resources{
ReqMem: resource.MustParse("2Gi"),
ReqCPU: resource.MustParse("1"),
Mem: resource.MustParse("2Gi"),
CPU: resource.MustParse("1"),
Disk: planResources.Disk,
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

res, errs := GetResources(&tt.claimResources, planResources)
assert.NoError(t, errors.Join(errs...))

assert.Equal(t, tt.expResult, res)
})
}
}
15 changes: 10 additions & 5 deletions pkg/comp-functions/functions/vshnkeycloak/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"

"dario.cat/mergo"
Expand Down Expand Up @@ -225,6 +226,7 @@ func addRelease(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.V
}

func getResources(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNKeycloak) (common.Resources, error) {
l := svc.Log
plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"])

resources, err := utils.FetchPlansFromConfig(ctx, svc, plan)
Expand All @@ -233,7 +235,10 @@ func getResources(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1
return common.Resources{}, err
}

res := common.GetResources(&comp.Spec.Parameters.Size, resources)
res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources)
if len(errs) != 0 {
l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim")
}

return res, nil
}
Expand Down Expand Up @@ -411,12 +416,12 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS
},
"resources": map[string]any{
"requests": map[string]any{
"memory": res.ReqMem,
"cpu": res.ReqCPU,
"memory": res.ReqMem.String(),
"cpu": res.ReqCPU.String(),
},
"limits": map[string]any{
"memory": res.Mem,
"cpu": res.CPU,
"memory": res.Mem.String(),
"cpu": res.CPU.String(),
},
},
"nodeSelector": nodeSelector,
Expand Down
23 changes: 14 additions & 9 deletions pkg/comp-functions/functions/vshnmariadb/mariadb_deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,20 +6,22 @@ import (
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"strconv"

xfnproto "github.com/crossplane/function-sdk-go/proto/v1beta1"
xhelmbeta1 "github.com/vshn/appcat/v4/apis/helm/release/v1beta1"
vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1"
"github.com/vshn/appcat/v4/pkg/common/utils"
"github.com/vshn/appcat/v4/pkg/comp-functions/functions/common"
"github.com/vshn/appcat/v4/pkg/comp-functions/functions/common/maintenance"
"github.com/vshn/appcat/v4/pkg/comp-functions/runtime"
"io"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/utils/ptr"
"strconv"
)

const (
Expand Down Expand Up @@ -136,7 +138,7 @@ func getConnectionDetails(comp *vshnv1.VSHNMariaDB, svc *runtime.ServiceRuntime,
}

func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VSHNMariaDB, secretName string) (map[string]interface{}, error) {

l := svc.Log
values := map[string]interface{}{}

plan := comp.Spec.Parameters.Size.GetPlan(svc.Config.Data["defaultPlan"])
Expand All @@ -147,7 +149,10 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS
return values, err
}

res := common.GetResources(&comp.Spec.Parameters.Size, resources)
res, errs := common.GetResources(&comp.Spec.Parameters.Size, resources)
if len(errs) != 0 {
l.Error(errors.Join(errs...), "Cannot get Resources from plan and claim")
}
nodeSelector, err := utils.FetchNodeSelectorFromConfig(ctx, svc, plan, comp.Spec.Parameters.Scheduling.NodeSelector)

if err != nil {
Expand All @@ -160,12 +165,12 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS
"replicaCount": 1,
"resources": map[string]interface{}{
"requests": map[string]interface{}{
"memory": res.ReqMem,
"cpu": res.ReqCPU,
"memory": res.ReqMem.String(),
"cpu": res.ReqCPU.String(),
},
"limits": map[string]interface{}{
"memory": res.Mem,
"cpu": res.CPU,
"memory": res.Mem.String(),
"cpu": res.CPU.String(),
},
},
"networkPolicy": map[string]interface{}{
Expand All @@ -179,7 +184,7 @@ func newValues(ctx context.Context, svc *runtime.ServiceRuntime, comp *vshnv1.VS
"certCAFilename": "ca.crt",
},
"persistence": map[string]interface{}{
"size": res.Disk,
"size": res.Disk.String(),
"storageClass": comp.Spec.Parameters.StorageClass,
},
"startupProbe": map[string]interface{}{
Expand Down
Loading

0 comments on commit 37e2aee

Please sign in to comment.