From df39422950c4fe91705cf0ace84fce52798af35a Mon Sep 17 00:00:00 2001 From: Nicolas Bigler Date: Tue, 19 Sep 2023 17:39:23 +0200 Subject: [PATCH 1/3] Fix resource calculation by taking sidecars into account Signed-off-by: Nicolas Bigler --- pkg/common/quotas/quotas.go | 19 +- pkg/common/quotas/quotas_test.go | 31 ++- pkg/common/utils/resources.go | 78 +++++-- pkg/common/utils/sidecars.go | 112 ++++++++++ pkg/common/utils/sidecars_test.go | 193 ++++++++++++++++++ .../functions/common/namespace-quotas.go | 17 +- pkg/controller/webhooks/postgresql.go | 15 ++ pkg/controller/webhooks/postgresql_test.go | 103 +++++++++- pkg/controller/webhooks/redis.go | 4 + test/transforms/common/quotas/01_default.yaml | 21 ++ 10 files changed, 560 insertions(+), 33 deletions(-) create mode 100644 pkg/common/utils/sidecars.go create mode 100644 pkg/common/utils/sidecars_test.go diff --git a/pkg/common/quotas/quotas.go b/pkg/common/quotas/quotas.go index 5d07c40ad1..9ab81509c1 100644 --- a/pkg/common/quotas/quotas.go +++ b/pkg/common/quotas/quotas.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/vshn/appcat/v4/pkg/common/utils" + iofruntime "github.com/vshn/appcat/v4/pkg/comp-functions/runtime" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" @@ -25,12 +26,13 @@ type QuotaChecker struct { gr schema.GroupResource gk schema.GroupKind checkNamespaceQuota bool + instances int64 } // NewQuotaChecker creates a new quota checker. // checkNamespaceQuota specifies whether or not the checker should also take the amount of existing // namespaces into account, or not. -func NewQuotaChecker(c client.Client, claimName, claimNamespace, instanceNamespace string, r utils.Resources, gr schema.GroupResource, gk schema.GroupKind, checkNamespaceQuota bool) QuotaChecker { +func NewQuotaChecker(c client.Client, claimName, claimNamespace, instanceNamespace string, r utils.Resources, gr schema.GroupResource, gk schema.GroupKind, checkNamespaceQuota bool, instances int64) QuotaChecker { return QuotaChecker{ requestedResources: r, instanceNamespace: instanceNamespace, @@ -40,6 +42,7 @@ func NewQuotaChecker(c client.Client, claimName, claimNamespace, instanceNamespa gr: gr, gk: gk, checkNamespaceQuota: checkNamespaceQuota, + instances: instances, } } @@ -60,7 +63,7 @@ func (q *QuotaChecker) CheckQuotas(ctx context.Context) *apierrors.StatusError { return nsErr } - return q.requestedResources.CheckResourcesAgainstQuotas(ctx, q.client, q.claimName, q.instanceNamespace, q.gk) + return q.requestedResources.CheckResourcesAgainstQuotas(ctx, q.client, q.claimName, q.instanceNamespace, q.gk, q.instances) } // areNamespacesWithinQuota checks for the given organization if there are still enough namespaces available. @@ -148,7 +151,7 @@ func (q *QuotaChecker) getNamespaceOverrides(ctx context.Context, c client.Clien // AddInitalNamespaceQuotas will add the default quotas to the namespace annotations. // It will only add them, if there are currently no such annotations in place. -func AddInitalNamespaceQuotas(ns *corev1.Namespace) bool { +func AddInitalNamespaceQuotas(ctx context.Context, iof *iofruntime.Runtime, ns *corev1.Namespace, s *utils.Sidecars, kind string) bool { annotations := ns.GetAnnotations() if annotations == nil { annotations = map[string]string{} @@ -156,28 +159,30 @@ func AddInitalNamespaceQuotas(ns *corev1.Namespace) bool { added := false + r := utils.GetDefaultResources(kind, s) + if _, ok := annotations[utils.DiskAnnotation]; !ok { annotations[utils.DiskAnnotation] = utils.DefaultDiskRequests.String() added = true } if _, ok := annotations[utils.CpuRequestAnnotation]; !ok { - annotations[utils.CpuRequestAnnotation] = utils.DefaultCPURequests.String() + annotations[utils.CpuRequestAnnotation] = r.CPURequests.String() added = true } if _, ok := annotations[utils.CpuLimitAnnotation]; !ok { - annotations[utils.CpuLimitAnnotation] = utils.DefaultCPULimit.String() + annotations[utils.CpuLimitAnnotation] = r.CPULimits.String() added = true } if _, ok := annotations[utils.MemoryRequestAnnotation]; !ok { - annotations[utils.MemoryRequestAnnotation] = utils.DefaultMemoryRequests.String() + annotations[utils.MemoryRequestAnnotation] = r.MemoryRequests.String() added = true } if _, ok := annotations[utils.MemoryLimitAnnotation]; !ok { - annotations[utils.MemoryLimitAnnotation] = utils.DefaultMemoryLimits.String() + annotations[utils.MemoryLimitAnnotation] = r.MemoryLimits.String() added = true } diff --git a/pkg/common/quotas/quotas_test.go b/pkg/common/quotas/quotas_test.go index c87bb242c8..74baff669e 100644 --- a/pkg/common/quotas/quotas_test.go +++ b/pkg/common/quotas/quotas_test.go @@ -7,8 +7,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/vshn/appcat/v4/apis/metadata" "github.com/vshn/appcat/v4/pkg" "github.com/vshn/appcat/v4/pkg/common/utils" + "github.com/vshn/appcat/v4/pkg/comp-functions/functions/commontest" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,6 +40,7 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { instanceNS *corev1.Namespace dummyNs int NSOverrides int + instances int64 }{ { name: "GivenNoInstanceNamespace_WhenCheckingAgainstDefault_ThenNoError", @@ -48,6 +51,7 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { MemoryRequests: *resource.NewQuantity(1811939328, resource.BinarySI), MemoryLimits: *resource.NewQuantity(1811939328, resource.BinarySI), }, + instances: 1, }, { name: "GivenNotInstancenamespace_WhenCheckingAgainstDefault_ThenError", @@ -58,7 +62,8 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { MemoryRequests: *resource.NewQuantity(1811939328, resource.BinarySI), MemoryLimits: *resource.NewQuantity(1811939328, resource.BinarySI), }, - wantErr: true, + instances: 1, + wantErr: true, }, { name: "GivenInstancenamespace_WhenCheckingAgainst_ThenNoError", @@ -77,6 +82,7 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { }, }, }, + instances: 1, }, { name: "GivenInstancenamespace_WhenCheckingAgainst_ThenError", @@ -95,7 +101,8 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { }, }, }, - wantErr: true, + instances: 1, + wantErr: true, }, { name: "GivenTooManyNamespaces_ThenError", @@ -106,8 +113,9 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { MemoryRequests: *resource.NewQuantity(1811939328, resource.BinarySI), MemoryLimits: *resource.NewQuantity(1811939328, resource.BinarySI), }, - wantErr: true, - dummyNs: 30, + instances: 1, + wantErr: true, + dummyNs: 30, }, { name: "GivenNamespaceOverride_ThenNoError", @@ -118,6 +126,7 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { MemoryRequests: *resource.NewQuantity(1811939328, resource.BinarySI), MemoryLimits: *resource.NewQuantity(1811939328, resource.BinarySI), }, + instances: 1, dummyNs: 30, NSOverrides: 35, }, @@ -130,8 +139,16 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { WithScheme(pkg.SetupScheme()). WithObjects(ns).Build() + iof := commontest.LoadRuntimeFromFile(t, "common/quotas/01_default.yaml") + objectMeta := &metadata.MetadataOnlyObject{} + + err := iof.Desired.GetComposite(ctx, objectMeta) + assert.NoError(t, err) + if tt.instanceNS != nil { - AddInitalNamespaceQuotas(ns) + s := &utils.Sidecars{} + assert.NoError(t, err) + AddInitalNamespaceQuotas(ctx, iof, ns, s, gk.Kind) assert.NoError(t, fclient.Create(ctx, tt.instanceNS)) } @@ -165,8 +182,8 @@ func TestQuotaChecker_CheckQuotas(t *testing.T) { } //when - checker := NewQuotaChecker(fclient, "mytest", "myns", "testns", tt.requested, gr, gk, true) - err := checker.CheckQuotas(ctx) + checker := NewQuotaChecker(fclient, "mytest", "myns", "testns", tt.requested, gr, gk, true, tt.instances) + err = checker.CheckQuotas(ctx) //Then // Unfortunately assert.Error and assert.NoError give us false positives here. diff --git a/pkg/common/utils/resources.go b/pkg/common/utils/resources.go index 0b821b3b9a..29e3aaac86 100644 --- a/pkg/common/utils/resources.go +++ b/pkg/common/utils/resources.go @@ -3,6 +3,7 @@ package utils import ( "context" "fmt" + "strings" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -76,7 +77,7 @@ var ( // defaultCPURequests 2* standard-8 will request 4 CPUs. This default has 500m as spare for jobs DefaultCPURequests = resource.NewMilliQuantity(4500, resource.DecimalSI) // defaultCPULimit by default same as DefaultCPURequests - DefaultCPULimit = DefaultCPURequests + DefaultCPULimits = DefaultCPURequests // defaultMemoryRequests 2* standard-8 will request 16Gb. This default has 500mb as spare for jobs DefaultMemoryRequests = resource.NewQuantity(17301504000, resource.BinarySI) // defaultMemoryLimits same as DefaultMemoryRequests @@ -91,9 +92,10 @@ var ( // The second case is usually triggered if a new instance is created, as we don't have a // namespace to check against. // Once the namespace exists, the composition should ensure that the annotations are set. -func (r *Resources) CheckResourcesAgainstQuotas(ctx context.Context, c client.Client, claimName, instanceNamespace string, gk schema.GroupKind) *apierrors.StatusError { +func (r *Resources) CheckResourcesAgainstQuotas(ctx context.Context, c client.Client, claimName, instanceNamespace string, gk schema.GroupKind, instances int64) *apierrors.StatusError { - nsQuotas := Resources{} + var nsQuotas Resources + s := &Sidecars{} if instanceNamespace != "" { q, err := r.getNamespaceQuotas(ctx, c, instanceNamespace) if err != nil { @@ -102,9 +104,18 @@ func (r *Resources) CheckResourcesAgainstQuotas(ctx context.Context, c client.Cl } } nsQuotas = q + } else { + if gk.Kind == "VSHNPostgreSQL" { + var err error + s, err = FetchSidecarsFromCluster(ctx, c, "vshnpostgresqlplans") + if err != nil { + return apierrors.NewInternalError(err) + } + } + nsQuotas = *GetDefaultResources(gk.Kind, s) } - quotaErrs := r.checkAgainstResources(nsQuotas) + quotaErrs := r.checkAgainstResources(nsQuotas, gk.Kind, s) if len(quotaErrs) == 0 { return nil @@ -181,46 +192,49 @@ func (r *Resources) getNamespaceQuotas(ctx context.Context, c client.Client, ins // checkAgainstResources compares this resources against the given resources. // Any non-populated fields are checked against their defaults. -func (r *Resources) checkAgainstResources(quotaResources Resources) field.ErrorList { +func (r *Resources) checkAgainstResources(quotaResources Resources, kind string, s *Sidecars) field.ErrorList { foundErrs := field.ErrorList{} - errCPURequests := field.Forbidden(r.CPURequestsPath, "Max allowed CPU requests: "+DefaultCPURequests.String()+". Configured requests: "+r.CPURequests.String()+". "+contactSupportMessage) + rDef := GetDefaultResources(kind, s) + + errCPURequests := field.Forbidden(r.CPURequestsPath, "Max allowed CPU requests: "+quotaResources.CPURequests.String()+". Configured requests: "+r.CPURequests.String()+". "+contactSupportMessage) + if !quotaResources.CPURequests.IsZero() { if r.CPURequests.Cmp(quotaResources.CPURequests) == 1 { foundErrs = append(foundErrs, errCPURequests) } - } else if r.CPURequests.Cmp(*DefaultCPURequests) == 1 { + } else if r.CPURequests.Cmp(rDef.CPURequests) == 1 { foundErrs = append(foundErrs, errCPURequests) } - errCPULimits := field.Forbidden(r.CPULimitsPath, "Max allowed CPU limits: "+DefaultCPULimit.String()+". Configured limits: "+r.CPULimits.String()+". "+contactSupportMessage) + errCPULimits := field.Forbidden(r.CPULimitsPath, "Max allowed CPU limits: "+quotaResources.CPULimits.String()+". Configured limits: "+r.CPULimits.String()+". "+contactSupportMessage) if !quotaResources.CPULimits.IsZero() { if r.CPULimits.Cmp(quotaResources.CPULimits) == 1 { foundErrs = append(foundErrs, errCPULimits) } - } else if r.CPULimits.Cmp(*DefaultCPULimit) == 1 { + } else if r.CPULimits.Cmp(rDef.CPULimits) == 1 { foundErrs = append(foundErrs, errCPULimits) } - errMemoryRequests := field.Forbidden(r.MemoryRequestsPath, "Max allowed Memory requests: "+DefaultMemoryRequests.String()+". Configured requests: "+r.MemoryRequests.String()+". "+contactSupportMessage) + errMemoryRequests := field.Forbidden(r.MemoryRequestsPath, "Max allowed Memory requests: "+quotaResources.MemoryRequests.String()+". Configured requests: "+r.MemoryRequests.String()+". "+contactSupportMessage) if !quotaResources.MemoryRequests.IsZero() { if r.MemoryRequests.Cmp(quotaResources.MemoryRequests) == 1 { foundErrs = append(foundErrs, errMemoryRequests) } - } else if r.MemoryRequests.Cmp(*DefaultMemoryRequests) == 1 { + } else if r.MemoryRequests.Cmp(rDef.MemoryRequests) == 1 { foundErrs = append(foundErrs, errMemoryRequests) } - errMemoryLimits := field.Forbidden(r.MemoryLimitsPath, "Max allowed Memory limits: "+DefaultMemoryLimits.String()+". Configured limits: "+r.MemoryLimits.String()+". "+contactSupportMessage) + errMemoryLimits := field.Forbidden(r.MemoryLimitsPath, "Max allowed Memory limits: "+quotaResources.MemoryLimits.String()+". Configured limits: "+r.MemoryLimits.String()+". "+contactSupportMessage) if !quotaResources.MemoryLimits.IsZero() { if r.MemoryLimits.Cmp(quotaResources.MemoryLimits) == 1 { foundErrs = append(foundErrs, errMemoryLimits) } - } else if r.MemoryLimits.Cmp(*DefaultMemoryLimits) == 1 { + } else if r.MemoryLimits.Cmp(rDef.MemoryLimits) == 1 { foundErrs = append(foundErrs, errMemoryLimits) } - errDisk := field.Forbidden(r.DiskPath, "Max allowed Disk: "+DefaultCPULimit.String()+". Configured requests: "+r.Disk.String()+". "+contactSupportMessage) + errDisk := field.Forbidden(r.DiskPath, "Max allowed Disk: "+quotaResources.Disk.String()+". Configured requests: "+r.Disk.String()+". "+contactSupportMessage) if !quotaResources.Disk.IsZero() { if r.Disk.Cmp(quotaResources.Disk) == 1 { foundErrs = append(foundErrs, errDisk) @@ -248,3 +262,39 @@ func (r *Resources) MultiplyBy(i int64) { r.MemoryRequests.Set(r.MemoryRequests.Value() * i) r.Disk.Set(r.Disk.Value() * i) } + +func (r *Resources) AddResources(resource Resources) { + r.CPULimits.Add(resource.CPULimits) + r.CPURequests.Add(resource.CPURequests) + r.MemoryLimits.Add(resource.MemoryLimits) + r.MemoryRequests.Add(resource.MemoryRequests) + r.Disk.Add(resource.Disk) +} + +// AddPsqlSidecarResources adds the resource overhead for the PostgreSQL sidecar to the given resource. +func (r *Resources) AddPsqlSidecarResources(s *Sidecars, instances int64) { + resourcesSidecars, err := GetAllSideCarsResources(s) + if err != nil { + instances = 1 + } + r.CPURequests.Add(*resource.NewMilliQuantity(resourcesSidecars.CPURequests.MilliValue()*instances, resource.DecimalSI)) + r.MemoryRequests.Add(*resource.NewQuantity(resourcesSidecars.MemoryRequests.Value()*instances, resource.BinarySI)) + + // Sidecar limits: 4800m CPU, 6144Mi Memory + r.CPULimits.Add(*resource.NewMilliQuantity(resourcesSidecars.CPULimits.MilliValue()*instances, resource.DecimalSI)) + r.MemoryLimits.Add(*resource.NewQuantity(resourcesSidecars.MemoryLimits.Value()*instances, resource.BinarySI)) +} + +// GetDefaultResources returns a new Resources struct with the default values. +func GetDefaultResources(kind string, s *Sidecars) *Resources { + r := &Resources{ + CPURequests: *DefaultCPURequests, + CPULimits: *DefaultCPULimits, + MemoryRequests: *DefaultMemoryRequests, + MemoryLimits: *DefaultMemoryLimits, + } + if strings.Contains(kind, "VSHNPostgreSQL") { + r.AddPsqlSidecarResources(s, int64(2)) + } + return r +} diff --git a/pkg/common/utils/sidecars.go b/pkg/common/utils/sidecars.go new file mode 100644 index 0000000000..7c01e8795d --- /dev/null +++ b/pkg/common/utils/sidecars.go @@ -0,0 +1,112 @@ +package utils + +import ( + "context" + "encoding/json" + + "github.com/spf13/viper" + "github.com/vshn/appcat/v4/pkg/comp-functions/runtime" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Sidecars map[string]sidecar + +type sidecar struct { + Limits struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` + } `json:"limits"` + Requests struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` + } `json:"requests"` +} + +func GetAllSideCarsResources(s *Sidecars) (Resources, error) { + + rTot := Resources{} + + for sidecar := range *s { + r, err := s.convertSidecarToResource(sidecar) + if err != nil { + return Resources{}, err + } + rTot.AddResources(r) + } + + return rTot, nil +} +func FetchSidecarsFromCluster(ctx context.Context, c client.Client, name string) (*Sidecars, error) { + s := &Sidecars{} + cm := &corev1.ConfigMap{} + + ns := viper.GetString("PLANS_NAMESPACE") + key := client.ObjectKey{Name: name, Namespace: ns} + err := c.Get(ctx, key, cm) + + if err != nil { + return &Sidecars{}, err + } + + err = json.Unmarshal([]byte(cm.Data["sideCars"]), s) + if err != nil { + return &Sidecars{}, err + } + + return s, nil +} + +func FetchSidecarsFromConfig(ctx context.Context, iof *runtime.Runtime) (*Sidecars, error) { + s := &Sidecars{} + + err := json.Unmarshal([]byte(iof.Config.Data["sideCars"]), s) + if err != nil { + return &Sidecars{}, err + } + + return s, nil +} + +// FetchSidecarFromCluster will fetch the specified sidecar from the current PLANS_NAMESPACE namespace and parse it into Resources. +// By default PLANS_NAMESPACE should be the same namespace where the controller pod is running. +func FetchSidecarFromCluster(ctx context.Context, c client.Client, name, sidecar string) (Resources, error) { + s, err := FetchSidecarsFromCluster(ctx, c, name) + if err != nil { + return Resources{}, err + } + + r, err := s.convertSidecarToResource(sidecar) + + return r, err +} + +func (s Sidecars) convertSidecarToResource(sidecar string) (Resources, error) { + r := Resources{} + var err error + + sideCar := s[sidecar] + + r.CPURequests, err = resource.ParseQuantity(sideCar.Requests.CPU) + if err != nil { + return Resources{}, err + } + + r.CPULimits, err = resource.ParseQuantity(sideCar.Limits.CPU) + if err != nil { + return Resources{}, err + } + + r.MemoryRequests, err = resource.ParseQuantity(sideCar.Requests.Memory) + if err != nil { + return Resources{}, err + } + + r.MemoryLimits, err = resource.ParseQuantity(sideCar.Limits.Memory) + if err != nil { + return Resources{}, err + } + + return r, nil +} diff --git a/pkg/common/utils/sidecars_test.go b/pkg/common/utils/sidecars_test.go new file mode 100644 index 0000000000..72c1dec118 --- /dev/null +++ b/pkg/common/utils/sidecars_test.go @@ -0,0 +1,193 @@ +package utils + +import ( + "context" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/vshn/appcat/v4/pkg" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFetchSidecarsFromCluster(t *testing.T) { + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "testns", + }, + Data: map[string]string{ + "sideCars": ` + { + "clusterController": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "32m", + "memory": "188Mi" + } + }, + "createBackup": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + }, + "envoy": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "32m", + "memory": "64Mi" + } + }, + "pgbouncer": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "16m", + "memory": "32Mi" + } + }, + "postgresUtil": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "10m", + "memory": "4Mi" + } + }, + "prometheusPostgresExporter": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "10m", + "memory": "32Mi" + } + }, + "runDbops": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + }, + "setDbopsResult": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + } + } + `, + }, + } + + type args struct { + name string + sideCar string + ns string + } + tests := []struct { + name string + args args + want Resources + wantErr bool + }{ + { + name: "GivenWrongNs_ThenGetError", + args: args{ + name: "test", + sideCar: "clusterController", + ns: "wrong", + }, + want: Resources{}, + wantErr: true, + }, + { + name: "GivenWrongName_ThenGetError", + args: args{ + name: "wrong", + sideCar: "clusterController", + ns: "testns", + }, + want: Resources{}, + wantErr: true, + }, + { + name: "GivenWrongSideCar_ThenGetError", + args: args{ + name: "test", + sideCar: "wrong", + ns: "testns", + }, + want: Resources{}, + wantErr: true, + }, + { + name: "GivenCorrectSideCar_ThenGetCorrectResources", + args: args{ + name: "test", + sideCar: "clusterController", + ns: "testns", + }, + want: Resources{ + CPURequests: *resource.NewMilliQuantity(32, resource.DecimalSI), + CPULimits: *resource.NewMilliQuantity(600, resource.DecimalSI), + Disk: *resource.NewQuantity(0, resource.BinarySI), + MemoryRequests: *resource.NewQuantity(197132288, resource.BinarySI), + MemoryLimits: *resource.NewQuantity(805306368, resource.BinarySI), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.TODO() + fclient := fake.NewClientBuilder().WithScheme(pkg.SetupScheme()). + WithObjects(cm).Build() + viper.Set("PLANS_NAMESPACE", tt.args.ns) + viper.AutomaticEnv() + + got, err := FetchSidecarFromCluster(ctx, fclient, tt.args.name, tt.args.sideCar) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assertEqualRessource(t, tt.want, got) + s, err := FetchSidecarsFromCluster(ctx, fclient, "test") + assert.NoError(t, err) + r, err := GetAllSideCarsResources(s) + assert.NoError(t, err) + assert.Equal(t, r.CPULimits.MilliValue(), resource.NewMilliQuantity(4800, resource.DecimalSI).MilliValue()) + assert.Equal(t, r.CPURequests.MilliValue(), resource.NewMilliQuantity(850, resource.DecimalSI).MilliValue()) + assert.Equal(t, r.MemoryLimits.Value(), resource.NewQuantity(6442450944, resource.BinarySI).Value()) + assert.Equal(t, r.MemoryRequests.Value(), resource.NewQuantity(1140850688, resource.BinarySI).Value()) + } + }) + } +} diff --git a/pkg/comp-functions/functions/common/namespace-quotas.go b/pkg/comp-functions/functions/common/namespace-quotas.go index 3d3ca00c98..1fadb0349b 100644 --- a/pkg/comp-functions/functions/common/namespace-quotas.go +++ b/pkg/comp-functions/functions/common/namespace-quotas.go @@ -2,6 +2,7 @@ package common import ( "context" + "github.com/vshn/appcat/v4/apis/metadata" "github.com/vshn/appcat/v4/pkg/common/quotas" "github.com/vshn/appcat/v4/pkg/common/utils" @@ -33,6 +34,13 @@ func AddInitialNamespaceQuotas(namespaceKon string) func(context.Context, *runti } orgAdded := false + objectMeta := &metadata.MetadataOnlyObject{} + + err = iof.Desired.GetComposite(ctx, objectMeta) + if err != nil { + return runtime.NewFatalErr(ctx, "cannot get composite meta", err) + } + if value, ok := ns.GetLabels()[utils.OrgLabelName]; !ok || value == "" { objectMeta := &metadata.MetadataOnlyObject{} @@ -49,9 +57,14 @@ func AddInitialNamespaceQuotas(namespaceKon string) func(context.Context, *runti orgAdded = true } + s, err := utils.FetchSidecarsFromConfig(ctx, iof) + if err != nil { + s = &utils.Sidecars{} + } + // We only act if either the quotas were missing or the organization label is not on the - // namespace. Otherwise, we ignore updates. This is to prevent any unwanted overwriting. - if quotas.AddInitalNamespaceQuotas(ns) || orgAdded { + // namespace. Otherwise we ignore updates. This is to prevent any unwanted overwriting. + if quotas.AddInitalNamespaceQuotas(ctx, iof, ns, s, objectMeta.TypeMeta.Kind) || orgAdded { err = iof.Desired.PutIntoObject(ctx, ns, namespaceKon) if err != nil { return runtime.NewFatalErr(ctx, "cannot save namespace quotas", err) diff --git a/pkg/controller/webhooks/postgresql.go b/pkg/controller/webhooks/postgresql.go index 49dd431237..81812a9168 100644 --- a/pkg/controller/webhooks/postgresql.go +++ b/pkg/controller/webhooks/postgresql.go @@ -102,6 +102,10 @@ func (p *PostgreSQLWebhookHandler) ValidateUpdate(ctx context.Context, oldObj, n return nil, fmt.Errorf("provided manifest is not a valid VSHNPostgreSQL object") } + if pg.DeletionTimestamp != nil { + return nil, nil + } + if p.withQuota { quotaErrs, fieldErrs := p.checkPostgreSQLQuotas(ctx, pg, false) if quotaErrs != nil { @@ -152,6 +156,15 @@ func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg return apierrors.NewInternalError(err), fieldErrs } } + s, err := utils.FetchSidecarsFromCluster(ctx, p.client, "vshnpostgresqlplans") + if err != nil { + return apierrors.NewInternalError(err), fieldErrs + } + + resourcesSidecars, err := utils.GetAllSideCarsResources(s) + if err != nil { + return apierrors.NewInternalError(err), fieldErrs + } p.addPathsToResources(&resources) @@ -190,6 +203,7 @@ func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg } } + resources.AddResources(resourcesSidecars) resources.MultiplyBy(instances) checker := quotas.NewQuotaChecker( @@ -201,6 +215,7 @@ func (p *PostgreSQLWebhookHandler) checkPostgreSQLQuotas(ctx context.Context, pg pgGR, pgGK, checkNamespaceQuota, + instances, ) return checker.CheckQuotas(ctx), fieldErrs diff --git a/pkg/controller/webhooks/postgresql_test.go b/pkg/controller/webhooks/postgresql_test.go index 818de8089f..e6c7a655eb 100644 --- a/pkg/controller/webhooks/postgresql_test.go +++ b/pkg/controller/webhooks/postgresql_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/go-logr/logr" + "github.com/spf13/viper" "github.com/stretchr/testify/assert" vshnv1 "github.com/vshn/appcat/v4/apis/vshn/v1" "github.com/vshn/appcat/v4/pkg" @@ -24,14 +25,109 @@ func TestPostgreSQLWebhookHandler_ValidateCreate(t *testing.T) { }, }, } + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vshnpostgresqlplans", + Namespace: "testns", + }, + Data: map[string]string{ + "sideCars": ` + { + "clusterController": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "32m", + "memory": "188Mi" + } + }, + "createBackup": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + }, + "envoy": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "32m", + "memory": "64Mi" + } + }, + "pgbouncer": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "16m", + "memory": "32Mi" + } + }, + "postgresUtil": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "10m", + "memory": "4Mi" + } + }, + "prometheusPostgresExporter": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "10m", + "memory": "32Mi" + } + }, + "runDbops": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + }, + "setDbopsResult": { + "limits": { + "cpu": "600m", + "memory": "768Mi" + }, + "requests": { + "cpu": "250m", + "memory": "256Mi" + } + } + } + `, + }, + } ctx := context.TODO() fclient := fake.NewClientBuilder(). WithScheme(pkg.SetupScheme()). - WithObjects(claimNS). + WithObjects(claimNS, cm). Build() + viper.Set("PLANS_NAMESPACE", "testns") + viper.AutomaticEnv() + handler := PostgreSQLWebhookHandler{ client: fclient, log: logr.Discard(), @@ -49,6 +145,7 @@ func TestPostgreSQLWebhookHandler_ValidateCreate(t *testing.T) { CPU: "500m", Memory: "1Gi", }, + Instances: 1, }, }, } @@ -62,13 +159,13 @@ func TestPostgreSQLWebhookHandler_ValidateCreate(t *testing.T) { //When quota breached // CPU Limits pgInvalid := pgOrig.DeepCopy() - pgInvalid.Spec.Parameters.Size.CPU = "5000m" + pgInvalid.Spec.Parameters.Size.CPU = "15000m" _, err = handler.ValidateCreate(ctx, pgInvalid) assert.Error(t, err) //CPU Requests pgInvalid = pgOrig.DeepCopy() - pgInvalid.Spec.Parameters.Size.Requests.CPU = "5000m" + pgInvalid.Spec.Parameters.Size.Requests.CPU = "6500m" _, err = handler.ValidateCreate(ctx, pgInvalid) assert.Error(t, err) diff --git a/pkg/controller/webhooks/redis.go b/pkg/controller/webhooks/redis.go index 6f5a59d631..5766085a13 100644 --- a/pkg/controller/webhooks/redis.go +++ b/pkg/controller/webhooks/redis.go @@ -71,6 +71,10 @@ func (r *RedisWebhookHandler) ValidateUpdate(ctx context.Context, oldObj, newObj return nil, fmt.Errorf("Provided manifest is not a valid VSHNRedis object") } + if redis.DeletionTimestamp != nil { + return nil, nil + } + if r.withQuota { err := r.checkQuotas(ctx, redis, false) if err != nil { diff --git a/test/transforms/common/quotas/01_default.yaml b/test/transforms/common/quotas/01_default.yaml index f26e28a5b8..18d6343b11 100644 --- a/test/transforms/common/quotas/01_default.yaml +++ b/test/transforms/common/quotas/01_default.yaml @@ -28,6 +28,27 @@ observed: name: myns data: desired: + composite: + resource: + apiVersion: vshn.appcat.vshn.io/v1 + kind: XVSHNPostgreSQL + metadata: + creationTimestamp: "2023-03-21T16:52:31Z" + finalizers: + - composite.apiextensions.crossplane.io + generateName: pgsql- + generation: 13 + labels: + appuio.io/organization: vshn + crossplane.io/claim-name: pgsql + crossplane.io/claim-namespace: unit-test + crossplane.io/composite: pgsql-gc9x4 + name: pgsql-gc9x4 + spec: + parameters: null + writeConnectionSecretToRef: {} + status: + instanceNamespace: my-psql resources: - name: namespace resource: From 85327244821610dcbae00a5f73153fe85f39041a Mon Sep 17 00:00:00 2001 From: Nicolas Bigler Date: Thu, 21 Sep 2023 09:44:49 +0200 Subject: [PATCH 2/3] Fix wrong GroupKind in redis Signed-off-by: Nicolas Bigler --- pkg/controller/webhooks/redis.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/controller/webhooks/redis.go b/pkg/controller/webhooks/redis.go index 5766085a13..e2e15c18fd 100644 --- a/pkg/controller/webhooks/redis.go +++ b/pkg/controller/webhooks/redis.go @@ -20,7 +20,7 @@ import ( var ( redisGK = schema.GroupKind{Group: "vshn.appcat.vshn.io", Kind: "VSHNRedis"} - redisGR = schema.GroupResource{Group: pgGK.Group, Resource: "vshnredis"} + redisGR = schema.GroupResource{Group: redisGK.Group, Resource: "vshnredis"} ) var _ webhook.CustomValidator = &RedisWebhookHandler{} @@ -48,13 +48,13 @@ func SetupRedisWebhookHandlerWithManager(mgr ctrl.Manager, withQuota bool) error // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type func (r *RedisWebhookHandler) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - pg, ok := obj.(*vshnv1.VSHNRedis) + redis, ok := obj.(*vshnv1.VSHNRedis) if !ok { return nil, fmt.Errorf("Provided manifest is not a valid VSHNRedis object") } if r.withQuota { - err := r.checkQuotas(ctx, pg, true) + err := r.checkQuotas(ctx, redis, true) if err != nil { return nil, err } @@ -95,6 +95,8 @@ func (r *RedisWebhookHandler) ValidateDelete(ctx context.Context, obj runtime.Ob func (r *RedisWebhookHandler) checkQuotas(ctx context.Context, redis *vshnv1.VSHNRedis, checkNamespaceQuota bool) *apierrors.StatusError { var fieldErr *field.Error + // TODO: Fix once we support replicas + instances := int64(1) allErrs := field.ErrorList{} resources := utils.Resources{} @@ -148,7 +150,7 @@ func (r *RedisWebhookHandler) checkQuotas(ctx context.Context, redis *vshnv1.VSH // But at the same time, if any of these fail we cannot do proper quota checks anymore. if len(allErrs) != 0 { return apierrors.NewInvalid( - pgGK, + redisGK, redis.GetName(), allErrs, ) @@ -163,9 +165,10 @@ func (r *RedisWebhookHandler) checkQuotas(ctx context.Context, redis *vshnv1.VSH redis.GetNamespace(), redis.Status.InstanceNamespace, resources, - pgGR, - pgGK, + redisGR, + redisGK, checkNamespaceQuota, + instances, ) return checker.CheckQuotas(ctx) From 65290ad358a9c0c85d845a21ff243d5e4b538e23 Mon Sep 17 00:00:00 2001 From: Nicolas Bigler Date: Wed, 27 Sep 2023 13:28:05 +0200 Subject: [PATCH 3/3] Add docs Signed-off-by: Nicolas Bigler --- docs/modules/ROOT/pages/explanations/webhooks.adoc | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/modules/ROOT/pages/explanations/webhooks.adoc b/docs/modules/ROOT/pages/explanations/webhooks.adoc index 2797ac89d3..09a476c766 100644 --- a/docs/modules/ROOT/pages/explanations/webhooks.adoc +++ b/docs/modules/ROOT/pages/explanations/webhooks.adoc @@ -5,6 +5,7 @@ APPUiO Cloud provides a quota system, which AppCat has to comply and integrate i It accomplishes that, by providing nice error messages, if any of the given quotas are breached. There are multiple quotas that need to be complied by this system: + * the amount of namespaces an organization can have * the amount of resources that can be used within a namespace @@ -16,8 +17,11 @@ If the requested AppCat instance would tip the amount of namespaces over this th Additionally, each namespace has quotas that need to comply. Via AppCat comp-functions, we set the quotas to a slightly higher value than the APPUiO Cloud default: -* 4.5 CPU limts/requests -* 16.5Gb Memory limits/requests +* 4.5 CPU limits/requests + limits/requests of all sidecars +* 16.5Gb Memory limits/requests + limits/requests of all sidecars + +The limits/requests of the sidecars are automatically calculated based on the defined limits/requests of all involved sidecars. +They are defined in the `sideCars` section of the service in the https://github.com/vshn/component-appcat/blob/master/class/defaults.yml[componet-appcat]. This should ensure that for each service at least one replica can be instantiated with the `*-8` plans. These values are written into annotations on the namespace.