From 271f583bfbc596886d8287e3738b01ec02e5cb82 Mon Sep 17 00:00:00 2001 From: Marcell Sevcsik <31651557+0sewa0@users.noreply.github.com> Date: Thu, 8 Aug 2024 09:18:43 +0200 Subject: [PATCH] Revert GORM changes (#3554) Co-authored-by: luhi-DT Co-authored-by: Lukas Hinterreiter <90035514+luhi-DT@users.noreply.github.com> --- .golangci.yml | 1 - cmd/csi/init/builder.go | 6 - go.mod | 4 +- go.sum | 5 + pkg/controllers/csi/config.go | 5 - pkg/controllers/csi/driver/server.go | 29 +- pkg/controllers/csi/driver/server_test.go | 55 -- .../csi/driver/volumes/app/publisher.go | 196 ++--- .../csi/driver/volumes/app/publisher_test.go | 218 ++--- .../csi/driver/volumes/bind_config.go | 50 ++ .../csi/driver/volumes/bind_config_test.go | 94 ++ .../csi/driver/volumes/host/publisher.go | 112 +-- .../csi/driver/volumes/host/publisher_test.go | 84 +- .../csi/driver/volumes/publisher.go | 6 +- .../csi/driver/volumes/volume_config.go | 2 +- .../csi/driver/volumes/volume_config_test.go | 7 +- pkg/controllers/csi/gc/binaries.go | 167 +++- pkg/controllers/csi/gc/binaries_test.go | 265 +++--- pkg/controllers/csi/gc/reconciler.go | 121 +-- pkg/controllers/csi/gc/reconciler_test.go | 33 +- pkg/controllers/csi/gc/unmounted.go | 1 - pkg/controllers/csi/gc/unmounted_test.go | 21 + pkg/controllers/csi/metadata/cleaner.go | 107 --- pkg/controllers/csi/metadata/cleaner_test.go | 328 ------- pkg/controllers/csi/metadata/correctness.go | 64 +- .../csi/metadata/correctness_test.go | 149 ++-- pkg/controllers/csi/metadata/fakes.go | 131 ++- pkg/controllers/csi/metadata/metadata.go | 143 +++ pkg/controllers/csi/metadata/metadata_test.go | 79 ++ pkg/controllers/csi/metadata/migrations.go | 192 ----- pkg/controllers/csi/metadata/models.go | 71 -- pkg/controllers/csi/metadata/path_resolver.go | 4 +- pkg/controllers/csi/metadata/sqlite.go | 814 ++++++++++++++++++ .../csi/metadata/sqlite_gorm_client.go | 470 ---------- .../csi/metadata/sqlite_gorm_client_test.go | 695 --------------- pkg/controllers/csi/metadata/sqlite_test.go | 645 ++++++++++++++ pkg/controllers/csi/provisioner/controller.go | 199 ++--- .../csi/provisioner/controller_test.go | 288 ++++--- pkg/controllers/csi/provisioner/install.go | 46 +- .../csi/provisioner/install_test.go | 32 +- .../codemodule/installer/image/installer.go | 7 +- .../installer/image/installer_test.go | 3 +- .../codemodule/installer/image/unpack.go | 8 +- pkg/util/testing/partial_equal.go | 31 - 44 files changed, 2913 insertions(+), 3075 deletions(-) create mode 100644 pkg/controllers/csi/driver/volumes/bind_config.go create mode 100644 pkg/controllers/csi/driver/volumes/bind_config_test.go delete mode 100644 pkg/controllers/csi/metadata/cleaner.go delete mode 100644 pkg/controllers/csi/metadata/cleaner_test.go create mode 100644 pkg/controllers/csi/metadata/metadata.go create mode 100644 pkg/controllers/csi/metadata/metadata_test.go delete mode 100644 pkg/controllers/csi/metadata/migrations.go delete mode 100644 pkg/controllers/csi/metadata/models.go create mode 100644 pkg/controllers/csi/metadata/sqlite.go delete mode 100644 pkg/controllers/csi/metadata/sqlite_gorm_client.go delete mode 100644 pkg/controllers/csi/metadata/sqlite_gorm_client_test.go create mode 100644 pkg/controllers/csi/metadata/sqlite_test.go delete mode 100644 pkg/util/testing/partial_equal.go diff --git a/.golangci.yml b/.golangci.yml index e6708818d0..3c66581390 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -137,7 +137,6 @@ linters-settings: - "github.com/docker/cli" - "github.com/go-gormigrate/gormigrate" - "github.com/google/uuid" - - "github.com/google/go-cmp/cmp" # Allowed packages in container-based builder. deny: diff --git a/cmd/csi/init/builder.go b/cmd/csi/init/builder.go index 75fb32fec3..934921a257 100644 --- a/cmd/csi/init/builder.go +++ b/cmd/csi/init/builder.go @@ -71,16 +71,10 @@ func (builder CommandBuilder) buildRun() func(*cobra.Command, []string) error { signalHandler := ctrl.SetupSignalHandler() - // new schema access, err := metadata.NewAccess(signalHandler, dtcsi.MetadataAccessPath) if err != nil { return err } - // new migrations - err = access.SchemaMigration() - if err != nil { - return err - } csiOptions := dtcsi.CSIOptions{ NodeId: nodeId, diff --git a/go.mod b/go.mod index f8259439fc..4c40bba9a1 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/google/go-containerregistry v0.19.2 github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.17.9 + github.com/mattn/go-sqlite3 v1.14.22 github.com/opencontainers/go-digest v1.0.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 @@ -88,9 +89,9 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/sys/mountinfo v0.6.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -125,6 +126,7 @@ require ( k8s.io/component-base v0.30.3 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/mount-utils v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 7dc8fed4b0..1ee11a180b 100644 --- a/go.sum +++ b/go.sum @@ -116,6 +116,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -251,6 +253,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -321,6 +324,8 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/mount-utils v0.30.3 h1:8Z3wSW5+GSvGNtlDhtoZrBCKLMIf5z/9tf8pie+G06s= +k8s.io/mount-utils v0.30.3/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= diff --git a/pkg/controllers/csi/config.go b/pkg/controllers/csi/config.go index d2c798ff89..84ca19a8fa 100644 --- a/pkg/controllers/csi/config.go +++ b/pkg/controllers/csi/config.go @@ -18,7 +18,6 @@ package dtcsi import ( "path/filepath" - "time" ) const ( @@ -36,10 +35,6 @@ const ( DaemonSetName = "dynatrace-oneagent-csi-driver" UnixUmask = 0000 - - ShortRequeueDuration = 1 * time.Minute - DefaultRequeueDuration = 5 * time.Minute - LongRequeueDuration = 30 * time.Minute ) var MetadataAccessPath = filepath.Join(DataPath, "csi.db") diff --git a/pkg/controllers/csi/driver/server.go b/pkg/controllers/csi/driver/server.go index 29d57dfe8b..77bea9bad9 100644 --- a/pkg/controllers/csi/driver/server.go +++ b/pkg/controllers/csi/driver/server.go @@ -36,7 +36,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ctrl "sigs.k8s.io/controller-runtime" ) @@ -145,7 +145,7 @@ func (svr *Server) NodePublishVolume(ctx context.Context, req *csi.NodePublishVo return nil, err } - if isMounted, err := isMounted(svr.mounter, volumeCfg.TargetPath); err != nil { + if isMounted, err := svr.mounter.IsMountPoint(volumeCfg.TargetPath); err != nil && !os.IsNotExist(err) { return nil, err } else if isMounted { return &csi.NodePublishVolumeResponse{}, nil @@ -166,25 +166,25 @@ func (svr *Server) NodePublishVolume(ctx context.Context, req *csi.NodePublishVo "mountflags", req.GetVolumeCapability().GetMount().GetMountFlags(), ) - return publisher.PublishVolume(ctx, *volumeCfg) + return publisher.PublishVolume(ctx, volumeCfg) } -func (svr *Server) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (response *csi.NodeUnpublishVolumeResponse, err error) { +func (svr *Server) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { volumeInfo, err := csivolumes.ParseNodeUnpublishVolumeRequest(req) if err != nil { return nil, err } for _, publisher := range svr.publishers { - canUnpublish, err := publisher.CanUnpublishVolume(ctx, *volumeInfo) + canUnpublish, err := publisher.CanUnpublishVolume(ctx, volumeInfo) if err != nil { log.Error(err, "couldn't determine if volume can be unpublished", "publisher", publisher) } if canUnpublish { - response, err := publisher.UnpublishVolume(ctx, *volumeInfo) + response, err := publisher.UnpublishVolume(ctx, volumeInfo) if err != nil { - log.Error(err, "couldn't unpublish volume properly", "publisher", publisher) + return nil, err } return response, nil @@ -197,10 +197,10 @@ func (svr *Server) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpubli } func (svr *Server) unmountUnknownVolume(volumeInfo csivolumes.VolumeInfo) { - log.Info("unmounting unknown volume", "volumeID", volumeInfo.VolumeID, "targetPath", volumeInfo.TargetPath) + log.Info("VolumeID not present in the database", "volumeID", volumeInfo.VolumeID, "targetPath", volumeInfo.TargetPath) if err := svr.mounter.Unmount(volumeInfo.TargetPath); err != nil { - log.Error(err, "failed to unmount unknown volume", "volumeID", volumeInfo.VolumeID) + log.Error(err, "Tried to unmount unknown volume", "volumeID", volumeInfo.VolumeID) } } @@ -228,17 +228,6 @@ func (svr *Server) NodeExpandVolume(context.Context, *csi.NodeExpandVolumeReques return nil, status.Error(codes.Unimplemented, "") } -func isMounted(mounter mount.Interface, targetPath string) (bool, error) { - isNotMounted, err := mount.IsNotMountPoint(mounter, targetPath) - if os.IsNotExist(err) { - isNotMounted = true - } else if err != nil { - return false, status.Error(codes.Internal, err.Error()) - } - - return !isNotMounted, nil -} - func logGRPC() grpc.UnaryServerInterceptor { return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { if info.FullMethod == "/csi.v1.Identity/Probe" || info.FullMethod == "/csi.v1.Node/NodeGetCapabilities" { diff --git a/pkg/controllers/csi/driver/server_test.go b/pkg/controllers/csi/driver/server_test.go index 7fdb6d4b2e..43de63cc2a 100644 --- a/pkg/controllers/csi/driver/server_test.go +++ b/pkg/controllers/csi/driver/server_test.go @@ -1,67 +1,12 @@ package csidriver import ( - "fmt" - "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/mount" ) -const ( - testTargetNotExist = "not-exists" - testTargetError = "error" - testTargetNotMounted = "not-mounted" - testTargetMounted = "mounted" - - testError = "test error message" -) - -type fakeMounter struct { - mount.FakeMounter -} - -func (*fakeMounter) IsLikelyNotMountPoint(target string) (bool, error) { - switch { - case target == testTargetNotExist: - return false, os.ErrNotExist - case target == testTargetError: - return false, fmt.Errorf(testError) - case target == testTargetMounted: - return true, nil - } - - return false, nil -} - -func TestCSIDriverServer_IsMounted(t *testing.T) { - t.Run(`mount point does not exist`, func(t *testing.T) { - mounted, err := isMounted(&fakeMounter{}, testTargetNotExist) - require.NoError(t, err) - assert.False(t, mounted) - }) - t.Run(`mounter throws error`, func(t *testing.T) { - mounted, err := isMounted(&fakeMounter{}, testTargetError) - - require.EqualError(t, err, "rpc error: code = Internal desc = test error message") - assert.False(t, mounted) - }) - t.Run(`mount point is not mounted`, func(t *testing.T) { - mounted, err := isMounted(&fakeMounter{}, testTargetNotMounted) - - require.NoError(t, err) - assert.True(t, mounted) - }) - t.Run(`mount point is mounted`, func(t *testing.T) { - mounted, err := isMounted(&fakeMounter{}, testTargetMounted) - - require.NoError(t, err) - assert.False(t, mounted) - }) -} - func TestCSIDriverServer_parseEndpoint(t *testing.T) { t.Run(`valid unix endpoint`, func(t *testing.T) { testEndpoint := "unix:///some/socket" diff --git a/pkg/controllers/csi/driver/volumes/app/publisher.go b/pkg/controllers/csi/driver/volumes/app/publisher.go index 7e0aec3699..e4390bf262 100644 --- a/pkg/controllers/csi/driver/volumes/app/publisher.go +++ b/pkg/controllers/csi/driver/volumes/app/publisher.go @@ -21,21 +21,19 @@ import ( "fmt" "io" "os" - "path" "path/filepath" + "strings" dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" csivolumes "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/driver/volumes" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" - "github.com/Dynatrace/dynatrace-operator/pkg/injection/codemodule/processmoduleconfig" "github.com/container-storage-interface/spec/lib/go/csi" "github.com/pkg/errors" dto "github.com/prometheus/client_model/go" "github.com/spf13/afero" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "gorm.io/gorm" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ) func NewAppVolumePublisher(fs afero.Afero, mounter mount.Interface, db metadata.Access, path metadata.PathResolver) csivolumes.Publisher { @@ -54,13 +52,13 @@ type AppVolumePublisher struct { path metadata.PathResolver } -func (publisher *AppVolumePublisher) PublishVolume(_ context.Context, volumeCfg csivolumes.VolumeConfig) (*csi.NodePublishVolumeResponse, error) { - tenantConfig, err := publisher.db.ReadTenantConfig(metadata.TenantConfig{Name: volumeCfg.DynakubeName}) +func (publisher *AppVolumePublisher) PublishVolume(ctx context.Context, volumeCfg *csivolumes.VolumeConfig) (*csi.NodePublishVolumeResponse, error) { + bindCfg, err := csivolumes.NewBindConfig(ctx, publisher.db, volumeCfg) if err != nil { - return nil, status.Error(codes.Internal, "failed to read tenant-config: "+err.Error()) + return nil, err } - hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(tenantConfig, volumeCfg) + hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(ctx, bindCfg, volumeCfg) if err != nil { return nil, err } @@ -71,53 +69,48 @@ func (publisher *AppVolumePublisher) PublishVolume(_ context.Context, volumeCfg return &csi.NodePublishVolumeResponse{}, nil } - if !IsArchiveAvailable(tenantConfig) { + if !bindCfg.IsArchiveAvailable() { return nil, status.Error( codes.Unavailable, - "version or digest is not yet set, csi-provisioner hasn't finished setup yet for tenant: "+tenantConfig.TenantUUID, + "version or digest is not yet set, csi-provisioner hasn't finished setup yet for tenant: "+bindCfg.TenantUUID, ) } - if err := publisher.ensureMountSteps(tenantConfig, volumeCfg); err != nil { + if err := publisher.ensureMountSteps(ctx, bindCfg, volumeCfg); err != nil { return nil, err } - agentsVersionsMetric.WithLabelValues(tenantConfig.DownloadedCodeModuleVersion).Inc() + agentsVersionsMetric.WithLabelValues(bindCfg.MetricVersionLabel()).Inc() return &csi.NodePublishVolumeResponse{}, nil } -func IsArchiveAvailable(tenantConfig *metadata.TenantConfig) bool { - return tenantConfig.DownloadedCodeModuleVersion != "" -} - -func (publisher *AppVolumePublisher) UnpublishVolume(_ context.Context, volumeInfo csivolumes.VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) { - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: volumeInfo.VolumeID}) - - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Info("failed to load AppMount", "error", err.Error()) +func (publisher *AppVolumePublisher) UnpublishVolume(ctx context.Context, volumeInfo *csivolumes.VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) { + volume, err := publisher.loadVolume(ctx, volumeInfo.VolumeID) + if err != nil { + log.Info("failed to load volume info", "error", err.Error()) } - if appMount == nil { + if volume == nil { return &csi.NodeUnpublishVolumeResponse{}, nil } - log.Info("loaded AppMount info", "id", appMount.VolumeMetaID, "pod name", appMount.VolumeMeta.PodName, "version", appMount.CodeModuleVersion) + log.Info("loaded volume info", "id", volume.VolumeID, "pod name", volume.PodName, "version", volume.Version, "dynakube", volume.TenantUUID) - if appMount.CodeModuleVersion == "" { - log.Info("requester has a dummy AppMount, no node-level unmount is needed") + if volume.Version == "" { + log.Info("requester has a dummy volume, no node-level unmount is needed") - return &csi.NodeUnpublishVolumeResponse{}, publisher.db.DeleteAppMount(&metadata.AppMount{VolumeMetaID: appMount.VolumeMetaID}) + return &csi.NodeUnpublishVolumeResponse{}, publisher.db.DeleteVolume(ctx, volume.VolumeID) } - overlayFSPath := filepath.Join(appMount.Location, dtcsi.OverlayMappedDirPath) - publisher.unmountOneAgent(volumeInfo.TargetPath, overlayFSPath) + overlayFSPath := publisher.path.AgentRunDirForVolume(volume.TenantUUID, volumeInfo.VolumeID) + publisher.umountOneAgent(volumeInfo.TargetPath, overlayFSPath) - if err = publisher.db.DeleteAppMount(&metadata.AppMount{VolumeMetaID: appMount.VolumeMetaID}); err != nil { + if err = publisher.db.DeleteVolume(ctx, volume.VolumeID); err != nil { return nil, status.Error(codes.Internal, err.Error()) } - log.Info("deleted AppMount", "ID", appMount.VolumeMetaID, "PodUID", appMount.VolumeMeta.PodName, "Version", appMount.CodeModuleVersion) + log.Info("deleted volume info", "ID", volume.VolumeID, "PodUID", volume.PodName, "Version", volume.Version, "TenantUUID", volume.TenantUUID) if err = publisher.fs.RemoveAll(volumeInfo.TargetPath); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -125,51 +118,66 @@ func (publisher *AppVolumePublisher) UnpublishVolume(_ context.Context, volumeIn log.Info("volume has been unpublished", "targetPath", volumeInfo.TargetPath) - publisher.fireVolumeUnpublishedMetric(appMount.CodeModuleVersion) + publisher.fireVolumeUnpublishedMetric(*volume) return &csi.NodeUnpublishVolumeResponse{}, nil } -func (publisher *AppVolumePublisher) CanUnpublishVolume(_ context.Context, volumeInfo csivolumes.VolumeInfo) (bool, error) { - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: volumeInfo.VolumeID}) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return false, err +func (publisher *AppVolumePublisher) CanUnpublishVolume(ctx context.Context, volumeInfo *csivolumes.VolumeInfo) (bool, error) { + volume, err := publisher.loadVolume(ctx, volumeInfo.VolumeID) + if err != nil { + return false, status.Error(codes.Internal, "failed to get volume info from database: "+err.Error()) } - return appMount != nil, nil + return volume != nil, nil } -func (publisher *AppVolumePublisher) fireVolumeUnpublishedMetric(volumeVersion string) { - if len(volumeVersion) > 0 { - agentsVersionsMetric.WithLabelValues(volumeVersion).Dec() +func (publisher *AppVolumePublisher) fireVolumeUnpublishedMetric(volume metadata.Volume) { + if len(volume.Version) > 0 { + agentsVersionsMetric.WithLabelValues(volume.Version).Dec() var m = &dto.Metric{} - if err := agentsVersionsMetric.WithLabelValues(volumeVersion).Write(m); err != nil { + if err := agentsVersionsMetric.WithLabelValues(volume.Version).Write(m); err != nil { log.Error(err, "failed to get the value of agent version metric") } if m.GetGauge().GetValue() <= float64(0) { - agentsVersionsMetric.DeleteLabelValues(volumeVersion) + agentsVersionsMetric.DeleteLabelValues(volume.Version) } } } -func (publisher *AppVolumePublisher) prepareUpperDir(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) (string, error) { - upperDir := publisher.path.OverlayVarDir(tenantConfig.TenantUUID, volumeCfg.VolumeID) +func (publisher *AppVolumePublisher) buildLowerDir(bindCfg *csivolumes.BindConfig) string { + var binFolderName string + if bindCfg.ImageDigest == "" { + binFolderName = bindCfg.Version + } else { + binFolderName = bindCfg.ImageDigest + } + + directories := []string{ + publisher.path.AgentSharedBinaryDirForAgent(binFolderName), + } + + return strings.Join(directories, ":") +} + +func (publisher *AppVolumePublisher) prepareUpperDir(bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) (string, error) { + upperDir := publisher.path.OverlayVarDir(bindCfg.TenantUUID, volumeCfg.VolumeID) err := publisher.fs.MkdirAll(upperDir, os.ModePerm) if err != nil { return "", errors.WithMessagef(err, "failed create overlay upper directory structure, path: %s", upperDir) } - destAgentConfPath := publisher.path.OverlayVarRuxitAgentProcConf(tenantConfig.TenantUUID, volumeCfg.VolumeID) + destAgentConfPath := publisher.path.OverlayVarRuxitAgentProcConf(bindCfg.TenantUUID, volumeCfg.VolumeID) err = publisher.fs.MkdirAll(filepath.Dir(destAgentConfPath), os.ModePerm) if err != nil { return "", errors.WithMessagef(err, "failed create overlay upper directory agent config directory structure, path: %s", upperDir) } - srcAgentConfPath := path.Join(tenantConfig.ConfigDirPath, processmoduleconfig.RuxitAgentProcPath) + srcAgentConfPath := publisher.path.AgentSharedRuxitAgentProcConf(bindCfg.TenantUUID, volumeCfg.DynakubeName) srcFile, err := publisher.fs.Open(srcAgentConfPath) if err != nil { @@ -198,25 +206,20 @@ func (publisher *AppVolumePublisher) prepareUpperDir(tenantConfig *metadata.Tena return upperDir, nil } -func (publisher *AppVolumePublisher) mountOneAgent(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) error { - mappedDir := publisher.path.OverlayMappedDir(tenantConfig.TenantUUID, volumeCfg.VolumeID) +func (publisher *AppVolumePublisher) mountOneAgent(bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) error { + mappedDir := publisher.path.OverlayMappedDir(bindCfg.TenantUUID, volumeCfg.VolumeID) _ = publisher.fs.MkdirAll(mappedDir, os.ModePerm) - codeModule, err := publisher.db.ReadCodeModule(metadata.CodeModule{Version: tenantConfig.DownloadedCodeModuleVersion}) - if err != nil { - return err - } - - upperDir, err := publisher.prepareUpperDir(tenantConfig, volumeCfg) + upperDir, err := publisher.prepareUpperDir(bindCfg, volumeCfg) if err != nil { return err } - workDir := publisher.path.OverlayWorkDir(tenantConfig.TenantUUID, volumeCfg.VolumeID) + workDir := publisher.path.OverlayWorkDir(bindCfg.TenantUUID, volumeCfg.VolumeID) _ = publisher.fs.MkdirAll(workDir, os.ModePerm) overlayOptions := []string{ - "lowerdir=" + codeModule.Location, + "lowerdir=" + publisher.buildLowerDir(bindCfg), "upperdir=" + upperDir, "workdir=" + workDir, } @@ -238,28 +241,27 @@ func (publisher *AppVolumePublisher) mountOneAgent(tenantConfig *metadata.Tenant return nil } -func (publisher *AppVolumePublisher) unmountOneAgent(targetPath string, overlayFSPath string) { +func (publisher *AppVolumePublisher) umountOneAgent(targetPath string, overlayFSPath string) { if err := publisher.mounter.Unmount(targetPath); err != nil { log.Error(err, "Unmount failed", "path", targetPath) } if filepath.IsAbs(overlayFSPath) { - if err := publisher.mounter.Unmount(overlayFSPath); err != nil { - log.Error(err, "Unmount failed", "path", overlayFSPath) + agentDirectoryForPod := filepath.Join(overlayFSPath, dtcsi.OverlayMappedDirPath) + if err := publisher.mounter.Unmount(agentDirectoryForPod); err != nil { + log.Error(err, "Unmount failed", "path", agentDirectoryForPod) } } } -func (publisher *AppVolumePublisher) ensureMountSteps(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) error { - if err := publisher.mountOneAgent(tenantConfig, volumeCfg); err != nil { +func (publisher *AppVolumePublisher) ensureMountSteps(ctx context.Context, bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) error { + if err := publisher.mountOneAgent(bindCfg, volumeCfg); err != nil { return status.Error(codes.Internal, fmt.Sprintf("failed to mount oneagent volume: %s", err)) } - if err := publisher.storeVolume(tenantConfig, volumeCfg); err != nil { - agentRunDirForVolume := publisher.path.AgentRunDirForVolume(tenantConfig.TenantUUID, volumeCfg.VolumeID) - overlayFSPath := filepath.Join(agentRunDirForVolume, dtcsi.OverlayMappedDirPath) - - publisher.unmountOneAgent(volumeCfg.TargetPath, overlayFSPath) + if err := publisher.storeVolume(ctx, bindCfg, volumeCfg); err != nil { + overlayFSPath := publisher.path.AgentRunDirForVolume(bindCfg.TenantUUID, volumeCfg.VolumeID) + publisher.umountOneAgent(volumeCfg.TargetPath, overlayFSPath) return status.Error(codes.Internal, fmt.Sprintf("Failed to store volume info: %s", err)) } @@ -267,62 +269,46 @@ func (publisher *AppVolumePublisher) ensureMountSteps(tenantConfig *metadata.Ten return nil } -func (publisher *AppVolumePublisher) hasTooManyMountAttempts(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) (bool, error) { - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: volumeCfg.VolumeID}) - if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { - appMount, err = publisher.newAppMount(tenantConfig, volumeCfg) - if err != nil { - return false, err - } - - publisher.db.CreateAppMount(appMount) - } else if err != nil { +func (publisher *AppVolumePublisher) hasTooManyMountAttempts(ctx context.Context, bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) (bool, error) { + volume, err := publisher.loadVolume(ctx, volumeCfg.VolumeID) + if err != nil { return false, err } - if appMount.MountAttempts > tenantConfig.MaxFailedMountAttempts { + if volume == nil { + volume = createNewVolume(bindCfg, volumeCfg) + } + + if volume.MountAttempts > bindCfg.MaxMountAttempts { return true, nil } - appMount.MountAttempts += 1 + volume.MountAttempts += 1 - return false, publisher.db.UpdateAppMount(appMount) + return false, publisher.db.InsertVolume(ctx, volume) } -func (publisher *AppVolumePublisher) storeVolume(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) error { - newAppMount, err := publisher.newAppMount(tenantConfig, volumeCfg) - if err != nil { - return err - } +func (publisher *AppVolumePublisher) storeVolume(ctx context.Context, bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) error { + volume := createNewVolume(bindCfg, volumeCfg) + log.Info("inserting volume info", "ID", volume.VolumeID, "PodUID", volume.PodName, "Version", volume.Version, "TenantUUID", volume.TenantUUID) - log.Info("inserting AppMount", "appMount", newAppMount) - - // check if it currently exists - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: newAppMount.VolumeMetaID}) - - if appMount == nil && errors.Is(err, gorm.ErrRecordNotFound) { - return publisher.db.CreateAppMount(newAppMount) - } + return publisher.db.InsertVolume(ctx, volume) +} +func (publisher *AppVolumePublisher) loadVolume(ctx context.Context, volumeID string) (*metadata.Volume, error) { + volume, err := publisher.db.GetVolume(ctx, volumeID) if err != nil { - return err + return nil, err } - return publisher.db.UpdateAppMount(newAppMount) + return volume, nil } -func (publisher *AppVolumePublisher) newAppMount(tenantConfig *metadata.TenantConfig, volumeCfg csivolumes.VolumeConfig) (*metadata.AppMount, error) { - codeModule, err := publisher.db.ReadCodeModule(metadata.CodeModule{Version: tenantConfig.DownloadedCodeModuleVersion}) - if err != nil { - return nil, err +func createNewVolume(bindCfg *csivolumes.BindConfig, volumeCfg *csivolumes.VolumeConfig) *metadata.Volume { + version := bindCfg.Version + if bindCfg.ImageDigest != "" { + version = bindCfg.ImageDigest } - return &metadata.AppMount{ - VolumeMeta: metadata.VolumeMeta{ID: volumeCfg.VolumeID, PodName: volumeCfg.PodName}, - CodeModule: *codeModule, - VolumeMetaID: volumeCfg.VolumeID, - CodeModuleVersion: codeModule.Version, - Location: publisher.path.AgentRunDirForVolume(tenantConfig.TenantUUID, volumeCfg.VolumeID), - MountAttempts: 0, - }, nil + return metadata.NewVolume(volumeCfg.VolumeID, volumeCfg.PodName, version, bindCfg.TenantUUID, 0) } diff --git a/pkg/controllers/csi/driver/volumes/app/publisher_test.go b/pkg/controllers/csi/driver/volumes/app/publisher_test.go index 226f9b87bd..f62f51a273 100644 --- a/pkg/controllers/csi/driver/volumes/app/publisher_test.go +++ b/pkg/controllers/csi/driver/volumes/app/publisher_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" + dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" csivolumes "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/driver/volumes" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" @@ -13,26 +13,24 @@ import ( "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gorm.io/gorm" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ) const ( - testPodUID = "a-pod" - testVolumeId = "a-volume" - testTargetPath = "/path/to/container/filesystem/opt/dynatrace/oneagent-paas" - testTenantUUID = "a-tenant-uuid" - testAgentVersion = "1.2-3" - testCodeModuleLocation = "/codemodules/" + testAgentVersion - testDynakubeName = "a-dynakube" - testImageDigest = "sha256:123456789" + testPodUID = "a-pod" + testVolumeId = "a-volume" + testTargetPath = "/path/to/container/filesystem/opt/dynatrace/oneagent-paas" + testTenantUUID = "a-tenant-uuid" + testAgentVersion = "1.2-3" + testDynakubeName = "a-dynakube" + testImageDigest = "sha256:123456789" ) func TestPublishVolume(t *testing.T) { t.Run("using url", func(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) - mockDynakubeMetadata(t, &publisher) + mockUrlDynakubeMetadata(t, &publisher) mockSharedRuxitAgentProcConf(t, &publisher) response, err := publisher.PublishVolume(context.Background(), createTestVolumeConfig()) @@ -44,7 +42,7 @@ func TestPublishVolume(t *testing.T) { assert.Equal(t, "overlay", mounter.MountPoints[0].Device) assert.Equal(t, "overlay", mounter.MountPoints[0].Type) assert.Equal(t, []string{ - "lowerdir=" + testCodeModuleLocation, + "lowerdir=/codemodules/1.2-3", "upperdir=/a-tenant-uuid/run/a-volume/var", "workdir=/a-tenant-uuid/run/a-volume/work"}, mounter.MountPoints[0].Opts) @@ -62,6 +60,39 @@ func TestPublishVolume(t *testing.T) { assertReferencesForPublishedVolume(t, &publisher, mounter) }) + t.Run("using code modules image", func(t *testing.T) { + mounter := mount.NewFakeMounter([]mount.MountPoint{}) + publisher := newPublisherForTesting(mounter) + mockImageDynakubeMetadata(t, &publisher) + mockSharedRuxitAgentProcConf(t, &publisher) + + response, err := publisher.PublishVolume(context.Background(), createTestVolumeConfig()) + require.NoError(t, err) + assert.NotNil(t, response) + + require.NotEmpty(t, mounter.MountPoints) + + assert.Equal(t, "overlay", mounter.MountPoints[0].Device) + assert.Equal(t, "overlay", mounter.MountPoints[0].Type) + assert.Equal(t, []string{ + "lowerdir=/codemodules/" + testImageDigest, + "upperdir=/a-tenant-uuid/run/a-volume/var", + "workdir=/a-tenant-uuid/run/a-volume/work"}, + mounter.MountPoints[0].Opts) + assert.Equal(t, "/a-tenant-uuid/run/a-volume/mapped", mounter.MountPoints[0].Path) + + assert.Equal(t, "overlay", mounter.MountPoints[1].Device) + assert.Equal(t, "", mounter.MountPoints[1].Type) + assert.Equal(t, []string{"bind"}, mounter.MountPoints[1].Opts) + assert.Equal(t, testTargetPath, mounter.MountPoints[1].Path) + + confCopied, err := publisher.fs.Exists(publisher.path.OverlayVarRuxitAgentProcConf(testTenantUUID, testVolumeId)) + require.NoError(t, err) + assert.True(t, confCopied) + + assertReferencesForPublishedVolumeWithCodeModulesImage(t, &publisher, mounter) + }) + t.Run("too many mount attempts", func(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) @@ -77,18 +108,17 @@ func TestPublishVolume(t *testing.T) { func TestPrepareUpperDir(t *testing.T) { testFileContent := []byte{'t', 'e', 's', 't'} + testBindConfig := &csivolumes.BindConfig{ + TenantUUID: testTenantUUID, + } t.Run("happy path -> file copied from shared dir to overlay dir", func(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) mockSharedRuxitAgentProcConf(t, &publisher, testFileContent...) - testTenantConfig := &metadata.TenantConfig{ - TenantUUID: testTenantUUID, - ConfigDirPath: publisher.path.AgentConfigDir(testTenantUUID, testDynakubeName), - } - upperDir, err := publisher.prepareUpperDir(testTenantConfig, createTestVolumeConfig()) + upperDir, err := publisher.prepareUpperDir(testBindConfig, createTestVolumeConfig()) require.NoError(t, err) require.NotEmpty(t, upperDir) assertUpperDirContent(t, &publisher, testFileContent) @@ -98,12 +128,8 @@ func TestPrepareUpperDir(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) - testTenantConfig := &metadata.TenantConfig{ - TenantUUID: testTenantUUID, - ConfigDirPath: "beep-boop", - } - upperDir, err := publisher.prepareUpperDir(testTenantConfig, createTestVolumeConfig()) + upperDir, err := publisher.prepareUpperDir(testBindConfig, createTestVolumeConfig()) require.Error(t, err) require.Empty(t, upperDir) @@ -122,35 +148,31 @@ func assertUpperDirContent(t *testing.T, publisher *AppVolumePublisher, expected func TestHasTooManyMountAttempts(t *testing.T) { t.Run(`initial try`, func(t *testing.T) { publisher := newPublisherForTesting(nil) - mockDynakubeMetadata(t, &publisher) - - tenantConfig := &metadata.TenantConfig{ - DownloadedCodeModuleVersion: testAgentVersion, - TenantUUID: testTenantUUID, - MaxFailedMountAttempts: dynakube.DefaultMaxFailedCsiMountAttempts, + bindCfg := &csivolumes.BindConfig{ + TenantUUID: testTenantUUID, + MaxMountAttempts: dynatracev1beta2.DefaultMaxFailedCsiMountAttempts, } volumeCfg := createTestVolumeConfig() - hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(tenantConfig, volumeCfg) + hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(context.Background(), bindCfg, volumeCfg) require.NoError(t, err) assert.False(t, hasTooManyAttempts) - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: volumeCfg.VolumeID}) + volume, err := publisher.db.GetVolume(context.Background(), volumeCfg.VolumeID) require.NoError(t, err) - require.NotNil(t, appMount) - assert.Equal(t, int64(1), appMount.MountAttempts) + require.NotNil(t, volume) + assert.Equal(t, 1, volume.MountAttempts) }) - t.Run(`too many mount attempts`, func(t *testing.T) { publisher := newPublisherForTesting(nil) mockFailedPublishedVolume(t, &publisher) - tenantConfig := &metadata.TenantConfig{ - MaxFailedMountAttempts: dynakube.DefaultMaxFailedCsiMountAttempts, + bindCfg := &csivolumes.BindConfig{ + MaxMountAttempts: dynatracev1beta2.DefaultMaxFailedCsiMountAttempts, } - hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(tenantConfig, createTestVolumeConfig()) + hasTooManyAttempts, err := publisher.hasTooManyMountAttempts(context.Background(), bindCfg, createTestVolumeConfig()) require.NoError(t, err) assert.True(t, hasTooManyAttempts) @@ -219,7 +241,7 @@ func TestNodePublishAndUnpublishVolume(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) - mockDynakubeMetadata(t, &publisher) + mockUrlDynakubeMetadata(t, &publisher) mockSharedRuxitAgentProcConf(t, &publisher) publishResponse, err := publisher.PublishVolume(context.Background(), createTestVolumeConfig()) @@ -247,32 +269,32 @@ func TestNodePublishAndUnpublishVolume(t *testing.T) { func TestStoreAndLoadPodInfo(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) - mockDynakubeMetadata(t, &publisher) - tenantConfig := &metadata.TenantConfig{ - DownloadedCodeModuleVersion: testAgentVersion, - TenantUUID: testTenantUUID, + bindCfg := &csivolumes.BindConfig{ + Version: testAgentVersion, + TenantUUID: testTenantUUID, } volumeCfg := createTestVolumeConfig() - err := publisher.storeVolume(tenantConfig, volumeCfg) + err := publisher.storeVolume(context.Background(), bindCfg, volumeCfg) require.NoError(t, err) - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: volumeCfg.VolumeID}) + volume, err := publisher.loadVolume(context.Background(), volumeCfg.VolumeID) require.NoError(t, err) - require.NotNil(t, appMount) - assert.Equal(t, testVolumeId, appMount.VolumeMetaID) - assert.Equal(t, testPodUID, appMount.VolumeMeta.PodName) - assert.Equal(t, testAgentVersion, appMount.CodeModuleVersion) + require.NotNil(t, volume) + assert.Equal(t, testVolumeId, volume.VolumeID) + assert.Equal(t, testPodUID, volume.PodName) + assert.Equal(t, testAgentVersion, volume.Version) + assert.Equal(t, testTenantUUID, volume.TenantUUID) } func TestLoadPodInfo_Empty(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: testVolumeId}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - require.Nil(t, appMount) + volume, err := publisher.loadVolume(context.Background(), testVolumeId) + require.NoError(t, err) + require.Nil(t, volume) } func TestMountIfDBHasError(t *testing.T) { @@ -280,12 +302,12 @@ func TestMountIfDBHasError(t *testing.T) { publisher := newPublisherForTesting(mounter) publisher.db = &metadata.FakeFailDB{} - tenantConfig := &metadata.TenantConfig{ - TenantUUID: testTenantUUID, - MaxFailedMountAttempts: dynakube.DefaultMaxFailedCsiMountAttempts, + bindCfg := &csivolumes.BindConfig{ + TenantUUID: testTenantUUID, + MaxMountAttempts: dynatracev1beta2.DefaultMaxFailedCsiMountAttempts, } - err := publisher.ensureMountSteps(tenantConfig, createTestVolumeConfig()) + err := publisher.ensureMountSteps(context.Background(), bindCfg, createTestVolumeConfig()) require.Error(t, err) require.Empty(t, mounter.MountPoints) } @@ -304,53 +326,20 @@ func newPublisherForTesting(mounter *mount.FakeMounter) AppVolumePublisher { } func mockPublishedVolume(t *testing.T, publisher *AppVolumePublisher) { - mockDynakubeMetadata(t, publisher) - - mockAppMount := metadata.AppMount{ - VolumeMeta: metadata.VolumeMeta{ID: testVolumeId, PodUid: testPodUID}, - CodeModule: metadata.CodeModule{Version: testAgentVersion}, - VolumeMetaID: testVolumeId, - CodeModuleVersion: testAgentVersion, - MountAttempts: 0, - Location: publisher.path.AgentRunDirForVolume(testTenantUUID, testVolumeId), - } - - err := publisher.db.CreateAppMount(&mockAppMount) + mockUrlDynakubeMetadata(t, publisher) + err := publisher.db.InsertVolume(context.Background(), metadata.NewVolume(testVolumeId, testPodUID, testAgentVersion, testTenantUUID, 0)) require.NoError(t, err) agentsVersionsMetric.WithLabelValues(testAgentVersion).Inc() } func mockFailedPublishedVolume(t *testing.T, publisher *AppVolumePublisher) { - mockDynakubeMetadata(t, publisher) - - appMount := &metadata.AppMount{ - VolumeMeta: metadata.VolumeMeta{ID: testVolumeId, PodUid: testPodUID}, - CodeModuleVersion: testAgentVersion, - MountAttempts: dynakube.DefaultMaxFailedCsiMountAttempts + 1, - VolumeMetaID: testVolumeId, - } - - err := publisher.db.CreateAppMount(appMount) + mockUrlDynakubeMetadata(t, publisher) + err := publisher.db.InsertVolume(context.Background(), metadata.NewVolume(testVolumeId, testPodUID, testAgentVersion, testTenantUUID, dynatracev1beta2.DefaultMaxFailedCsiMountAttempts+1)) require.NoError(t, err) } -func mockDynakubeMetadata(t *testing.T, publisher *AppVolumePublisher) { - tenantConfig := metadata.TenantConfig{ - Name: testDynakubeName, - TenantUUID: testTenantUUID, - DownloadedCodeModuleVersion: testAgentVersion, - ConfigDirPath: publisher.path.AgentConfigDir(testTenantUUID, testDynakubeName), - MaxFailedMountAttempts: 0, - } - err := publisher.db.CreateTenantConfig(&tenantConfig) - require.NoError(t, err) - - codeModule := metadata.CodeModule{ - Version: testAgentVersion, - Location: testCodeModuleLocation, - } - - err = publisher.db.CreateCodeModule(&codeModule) +func mockUrlDynakubeMetadata(t *testing.T, publisher *AppVolumePublisher) { + err := publisher.db.InsertDynakube(context.Background(), metadata.NewDynakube(testDynakubeName, testTenantUUID, testAgentVersion, "", 0)) require.NoError(t, err) } @@ -365,20 +354,37 @@ func mockSharedRuxitAgentProcConf(t *testing.T, publisher *AppVolumePublisher, c } } +func mockImageDynakubeMetadata(t *testing.T, publisher *AppVolumePublisher) { + err := publisher.db.InsertDynakube(context.Background(), metadata.NewDynakube(testDynakubeName, testTenantUUID, "", testImageDigest, dynatracev1beta2.DefaultMaxFailedCsiMountAttempts)) + require.NoError(t, err) +} + func assertReferencesForPublishedVolume(t *testing.T, publisher *AppVolumePublisher, mounter *mount.FakeMounter) { assert.NotEmpty(t, mounter.MountPoints) - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: testVolumeId}) + volume, err := publisher.loadVolume(context.Background(), testVolumeId) require.NoError(t, err) - assert.Equal(t, testVolumeId, appMount.VolumeMetaID) - assert.Equal(t, testPodUID, appMount.VolumeMeta.PodName) - assert.Equal(t, testAgentVersion, appMount.CodeModuleVersion) + assert.Equal(t, testVolumeId, volume.VolumeID) + assert.Equal(t, testPodUID, volume.PodName) + assert.Equal(t, testAgentVersion, volume.Version) + assert.Equal(t, testTenantUUID, volume.TenantUUID) +} + +func assertReferencesForPublishedVolumeWithCodeModulesImage(t *testing.T, publisher *AppVolumePublisher, mounter *mount.FakeMounter) { + assert.NotEmpty(t, mounter.MountPoints) + + volume, err := publisher.loadVolume(context.Background(), testVolumeId) + require.NoError(t, err) + assert.Equal(t, testVolumeId, volume.VolumeID) + assert.Equal(t, testPodUID, volume.PodName) + assert.Equal(t, testImageDigest, volume.Version) + assert.Equal(t, testTenantUUID, volume.TenantUUID) } func assertNoReferencesForUnpublishedVolume(t *testing.T, publisher *AppVolumePublisher) { - appMount, err := publisher.db.ReadAppMount(metadata.AppMount{VolumeMetaID: testVolumeId}) - require.Error(t, err) - require.Nil(t, appMount) + volume, err := publisher.loadVolume(context.Background(), testVolumeId) + require.NoError(t, err) + require.Nil(t, volume) } func resetMetrics() { @@ -386,17 +392,17 @@ func resetMetrics() { agentsVersionsMetric.DeleteLabelValues(testImageDigest) } -func createTestVolumeConfig() csivolumes.VolumeConfig { - return csivolumes.VolumeConfig{ - VolumeInfo: createTestVolumeInfo(), +func createTestVolumeConfig() *csivolumes.VolumeConfig { + return &csivolumes.VolumeConfig{ + VolumeInfo: *createTestVolumeInfo(), PodName: testPodUID, Mode: Mode, DynakubeName: testDynakubeName, } } -func createTestVolumeInfo() csivolumes.VolumeInfo { - return csivolumes.VolumeInfo{ +func createTestVolumeInfo() *csivolumes.VolumeInfo { + return &csivolumes.VolumeInfo{ VolumeID: testVolumeId, TargetPath: testTargetPath, } diff --git a/pkg/controllers/csi/driver/volumes/bind_config.go b/pkg/controllers/csi/driver/volumes/bind_config.go new file mode 100644 index 0000000000..50762eb65c --- /dev/null +++ b/pkg/controllers/csi/driver/volumes/bind_config.go @@ -0,0 +1,50 @@ +package csivolumes + +import ( + "context" + "fmt" + + "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type BindConfig struct { + TenantUUID string + Version string + ImageDigest string + DynakubeName string + MaxMountAttempts int +} + +func NewBindConfig(ctx context.Context, access metadata.Access, volumeCfg *VolumeConfig) (*BindConfig, error) { + dynakube, err := access.GetDynakube(ctx, volumeCfg.DynakubeName) + if err != nil { + return nil, status.Error(codes.Unavailable, fmt.Sprintf("failed to extract tenant for DynaKube %s: %s", volumeCfg.DynakubeName, err.Error())) + } + + if dynakube == nil { + return nil, status.Error(codes.Unavailable, fmt.Sprintf("dynakube (%s) is missing from metadata database", volumeCfg.DynakubeName)) + } + + return &BindConfig{ + TenantUUID: dynakube.TenantUUID, + Version: dynakube.LatestVersion, + ImageDigest: dynakube.ImageDigest, + DynakubeName: dynakube.Name, + MaxMountAttempts: dynakube.MaxFailedMountAttempts, + }, nil +} + +func (cfg BindConfig) IsArchiveAvailable() bool { + return cfg.Version != "" || cfg.ImageDigest != "" +} + +func (cfg BindConfig) MetricVersionLabel() string { + versionLabel := cfg.Version + if versionLabel == "" { + versionLabel = cfg.ImageDigest + } + + return versionLabel +} diff --git a/pkg/controllers/csi/driver/volumes/bind_config_test.go b/pkg/controllers/csi/driver/volumes/bind_config_test.go new file mode 100644 index 0000000000..8debddb242 --- /dev/null +++ b/pkg/controllers/csi/driver/volumes/bind_config_test.go @@ -0,0 +1,94 @@ +package csivolumes + +import ( + "context" + "testing" + + "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testDynakubeName = "a-dynakube" + testTenantUUID = "a-tenant-uuid" + testAgentVersion = "1.2-3" +) + +func TestNewBindConfig(t *testing.T) { + t.Run(`no dynakube in storage`, func(t *testing.T) { + volumeCfg := &VolumeConfig{ + DynakubeName: testDynakubeName, + } + + bindCfg, err := NewBindConfig(context.TODO(), metadata.FakeMemoryDB(), volumeCfg) + + require.Error(t, err) + assert.Nil(t, bindCfg) + }) + t.Run(`create correct bind config`, func(t *testing.T) { + volumeCfg := &VolumeConfig{ + DynakubeName: testDynakubeName, + } + + db := metadata.FakeMemoryDB() + + db.InsertDynakube(context.TODO(), metadata.NewDynakube(testDynakubeName, testTenantUUID, testAgentVersion, "", 0)) + + bindCfg, err := NewBindConfig(context.TODO(), db, volumeCfg) + + expected := BindConfig{ + TenantUUID: testTenantUUID, + Version: testAgentVersion, + DynakubeName: testDynakubeName, + } + + require.NoError(t, err) + assert.NotNil(t, bindCfg) + assert.Equal(t, expected, *bindCfg) + }) +} + +func TestIsArchiveAvailable(t *testing.T) { + t.Run(`no version, no digest`, func(t *testing.T) { + bindCfg := BindConfig{} + + assert.False(t, bindCfg.IsArchiveAvailable()) + }) + t.Run(`version set, no digest`, func(t *testing.T) { + bindCfg := BindConfig{ + Version: "1.2.3", + } + + assert.True(t, bindCfg.IsArchiveAvailable()) + }) + t.Run(`no version, digest set`, func(t *testing.T) { + bindCfg := BindConfig{ + ImageDigest: "sha256:123", + } + + assert.True(t, bindCfg.IsArchiveAvailable()) + }) +} + +func TestMetricVersionLabel(t *testing.T) { + t.Run(`no version, no digest`, func(t *testing.T) { + bindCfg := BindConfig{} + + assert.Empty(t, bindCfg.MetricVersionLabel()) + }) + t.Run(`version set, no digest`, func(t *testing.T) { + bindCfg := BindConfig{ + Version: "1.2.3", + } + + assert.Equal(t, bindCfg.Version, bindCfg.MetricVersionLabel()) + }) + t.Run(`no version, digest set`, func(t *testing.T) { + bindCfg := BindConfig{ + ImageDigest: "sha256:123", + } + + assert.Equal(t, bindCfg.ImageDigest, bindCfg.MetricVersionLabel()) + }) +} diff --git a/pkg/controllers/csi/driver/volumes/host/publisher.go b/pkg/controllers/csi/driver/volumes/host/publisher.go index 77629bec0d..8dcf1ea5c6 100644 --- a/pkg/controllers/csi/driver/volumes/host/publisher.go +++ b/pkg/controllers/csi/driver/volumes/host/publisher.go @@ -18,22 +18,20 @@ package hostvolumes import ( "context" - goerrors "errors" "fmt" "os" + "time" csivolumes "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/driver/volumes" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/pkg/errors" "github.com/spf13/afero" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "gorm.io/gorm" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ) -const failedToGetOsAgentVolumePrefix = "failed to get OSMount from database: " +const failedToGetOsAgentVolumePrefix = "failed to get osagent volume info from database: " func NewHostVolumePublisher(fs afero.Afero, mounter mount.Interface, db metadata.Access, path metadata.PathResolver) csivolumes.Publisher { return &HostVolumePublisher{ @@ -56,105 +54,81 @@ type HostVolumePublisher struct { path metadata.PathResolver } -func (publisher *HostVolumePublisher) PublishVolume(ctx context.Context, volumeCfg csivolumes.VolumeConfig) (*csi.NodePublishVolumeResponse, error) { - tenantConfig, err := publisher.db.ReadTenantConfig(metadata.TenantConfig{Name: volumeCfg.DynakubeName}) +func (publisher *HostVolumePublisher) PublishVolume(ctx context.Context, volumeCfg *csivolumes.VolumeConfig) (*csi.NodePublishVolumeResponse, error) { + bindCfg, err := csivolumes.NewBindConfig(ctx, publisher.db, volumeCfg) if err != nil { - return nil, status.Error(codes.Internal, "failed to read tenant-config: "+err.Error()) + return nil, err } - osMount, err := publisher.db.ReadUnscopedOSMount(metadata.OSMount{TenantUUID: tenantConfig.TenantUUID}) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return nil, status.Error(codes.Internal, failedToGetOsAgentVolumePrefix+err.Error()) + if err := publisher.mountOneAgent(bindCfg.TenantUUID, volumeCfg); err != nil { + return nil, status.Error(codes.Internal, "failed to mount osagent volume: "+err.Error()) } - if osMount != nil { - if !osMount.DeletedAt.Valid { - // If the OSAgents were removed forcefully, we might not get the unmount request, so we can't fully relay on the database, and have to directly check if its mounted or not - isNotMounted, err := publisher.isNotMounted(publisher.mounter, osMount.Location) - if err != nil { - return nil, err - } - - if !isNotMounted { - return &csi.NodePublishVolumeResponse{}, goerrors.New("previous OSMount is yet to be unmounted, there can be only 1 OSMount per tenant per node, blocking until unmount") // don't want to have the stacktrace here, it just pollutes the logs - } - } - - osMount.VolumeMeta = metadata.VolumeMeta{ - ID: volumeCfg.VolumeID, - PodName: volumeCfg.PodName, - } - osMount.TenantConfig = *tenantConfig + volume, err := publisher.db.GetOsAgentVolumeViaTenantUUID(ctx, bindCfg.TenantUUID) + if err != nil { + return nil, status.Error(codes.Internal, failedToGetOsAgentVolumePrefix+err.Error()) + } - if err := publisher.mountOneAgent(osMount, volumeCfg); err != nil { - return nil, status.Error(codes.Internal, "failed to mount OSMount: "+err.Error()) + timestamp := time.Now() + if volume == nil { + storage := metadata.OsAgentVolume{ + VolumeID: volumeCfg.VolumeID, + TenantUUID: bindCfg.TenantUUID, + Mounted: true, + LastModified: ×tamp, } - - _, err = publisher.db.RestoreOSMount(osMount) - if err != nil { - return nil, status.Error(codes.Internal, "failed to restore OSMount: "+err.Error()) + if err := publisher.db.InsertOsAgentVolume(ctx, &storage); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("failed to insert osagent volume info to database. info: %v err: %s", storage, err.Error())) } - - return &csi.NodePublishVolumeResponse{}, nil } else { - osMount := metadata.OSMount{ - VolumeMeta: metadata.VolumeMeta{ID: volumeCfg.VolumeID, PodName: volumeCfg.PodName}, - VolumeMetaID: volumeCfg.VolumeID, - TenantUUID: tenantConfig.TenantUUID, - Location: publisher.path.OsAgentDir(tenantConfig.TenantUUID), - MountAttempts: 0, - TenantConfig: *tenantConfig, - } - - if err := publisher.mountOneAgent(&osMount, volumeCfg); err != nil { - return nil, status.Error(codes.Internal, "failed to mount OSMount: "+err.Error()) - } + volume.VolumeID = volumeCfg.VolumeID + volume.Mounted = true + volume.LastModified = ×tamp - if err := publisher.db.CreateOSMount(&osMount); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("failed to insert OSMount to database. info: %v err: %s", osMount, err.Error())) + if err := publisher.db.UpdateOsAgentVolume(ctx, volume); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("failed to update osagent volume info to database. info: %v err: %s", volume, err.Error())) } } return &csi.NodePublishVolumeResponse{}, nil } -func (publisher *HostVolumePublisher) UnpublishVolume(ctx context.Context, volumeInfo csivolumes.VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) { - osMount, err := publisher.db.ReadOSMount(metadata.OSMount{VolumeMetaID: volumeInfo.VolumeID}) - - if errors.Is(err, gorm.ErrRecordNotFound) { - return &csi.NodeUnpublishVolumeResponse{}, nil - } - +func (publisher *HostVolumePublisher) UnpublishVolume(ctx context.Context, volumeInfo *csivolumes.VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) { + volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(ctx, volumeInfo.VolumeID) if err != nil { return nil, status.Error(codes.Internal, failedToGetOsAgentVolumePrefix+err.Error()) } - if osMount == nil { + if volume == nil { return &csi.NodeUnpublishVolumeResponse{}, nil } - publisher.unmountOneAgent(volumeInfo.TargetPath) + publisher.umountOneAgent(volumeInfo.TargetPath) - if err := publisher.db.DeleteOSMount(&metadata.OSMount{TenantUUID: osMount.TenantUUID}); err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("failed to update OSMount to database. info: %v err: %s", osMount, err.Error())) + timestamp := time.Now() + volume.Mounted = false + volume.LastModified = ×tamp + + if err := publisher.db.UpdateOsAgentVolume(ctx, volume); err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("failed to update osagent volume info to database. info: %v err: %s", volume, err.Error())) } - log.Info("OSMount has been unpublished", "targetPath", volumeInfo.TargetPath) + log.Info("osagent volume has been unpublished", "targetPath", volumeInfo.TargetPath) return &csi.NodeUnpublishVolumeResponse{}, nil } -func (publisher *HostVolumePublisher) CanUnpublishVolume(ctx context.Context, volumeInfo csivolumes.VolumeInfo) (bool, error) { - volume, err := publisher.db.ReadOSMount(metadata.OSMount{VolumeMetaID: volumeInfo.VolumeID}) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { +func (publisher *HostVolumePublisher) CanUnpublishVolume(ctx context.Context, volumeInfo *csivolumes.VolumeInfo) (bool, error) { + volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(ctx, volumeInfo.VolumeID) + if err != nil { return false, status.Error(codes.Internal, failedToGetOsAgentVolumePrefix+err.Error()) } return volume != nil, nil } -func (publisher *HostVolumePublisher) mountOneAgent(osMount *metadata.OSMount, volumeCfg csivolumes.VolumeConfig) error { - hostDir := osMount.Location +func (publisher *HostVolumePublisher) mountOneAgent(tenantUUID string, volumeCfg *csivolumes.VolumeConfig) error { + hostDir := publisher.path.OsAgentDir(tenantUUID) _ = publisher.fs.MkdirAll(hostDir, os.ModePerm) if err := publisher.fs.MkdirAll(volumeCfg.TargetPath, os.ModePerm); err != nil { @@ -170,7 +144,7 @@ func (publisher *HostVolumePublisher) mountOneAgent(osMount *metadata.OSMount, v return nil } -func (publisher *HostVolumePublisher) unmountOneAgent(targetPath string) { +func (publisher *HostVolumePublisher) umountOneAgent(targetPath string) { if err := publisher.mounter.Unmount(targetPath); err != nil { log.Error(err, "Unmount failed", "path", targetPath) } diff --git a/pkg/controllers/csi/driver/volumes/host/publisher_test.go b/pkg/controllers/csi/driver/volumes/host/publisher_test.go index c5b8b84e87..042ca4ba22 100644 --- a/pkg/controllers/csi/driver/volumes/host/publisher_test.go +++ b/pkg/controllers/csi/driver/volumes/host/publisher_test.go @@ -3,6 +3,7 @@ package hostvolumes import ( "context" "testing" + "time" dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" csivolumes "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/driver/volumes" @@ -10,7 +11,7 @@ import ( "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ) const ( @@ -36,8 +37,7 @@ func TestPublishVolume(t *testing.T) { assert.NotEmpty(t, mounter.MountPoints) assertReferencesForPublishedVolume(t, &publisher, mounter) }) - - t.Run("not ready dynakube", func(t *testing.T) { + t.Run(`not ready dynakube`, func(t *testing.T) { mounter := mount.NewFakeMounter([]mount.MountPoint{}) publisher := newPublisherForTesting(mounter) @@ -50,46 +50,6 @@ func TestPublishVolume(t *testing.T) { assert.NotEmpty(t, mounter.MountPoints) assertReferencesForPublishedVolume(t, &publisher, mounter) }) - - t.Run("publish volume when previous OSMount not yet unmounted (upgrade scenario) => error", func(t *testing.T) { - mounter := mount.NewFakeMounter([]mount.MountPoint{}) - publisher := newPublisherForTesting(mounter) - - mockDynakube(t, &publisher) - - response, err := publisher.PublishVolume(ctx, createTestVolumeConfig()) - - require.NoError(t, err) - assert.NotNil(t, response) - assert.NotEmpty(t, mounter.MountPoints) - assertReferencesForPublishedVolume(t, &publisher, mounter) - - response, err = publisher.PublishVolume(ctx, createTestVolumeConfig()) - require.Error(t, err) - assert.NotNil(t, response) - }) - - t.Run("publish volume when previous OSMount force deleted => do mount", func(t *testing.T) { - mounter := mount.NewFakeMounter([]mount.MountPoint{}) - publisher := newPublisherForTesting(mounter) - mockDynakube(t, &publisher) - - response, err := publisher.PublishVolume(ctx, createTestVolumeConfig()) - - require.NoError(t, err) - assert.NotNil(t, response) - assert.NotEmpty(t, mounter.MountPoints) - assertReferencesForPublishedVolume(t, &publisher, mounter) - - publisher.isNotMounted = func(mounter mount.Interface, file string) (bool, error) { - return true, nil - } - response, err = publisher.PublishVolume(ctx, createTestVolumeConfig()) - require.NoError(t, err) - assert.NotNil(t, response) - assert.NotEmpty(t, mounter.MountPoints) - assertReferencesForPublishedVolume(t, &publisher, mounter) - }) } func TestUnpublishVolume(t *testing.T) { @@ -120,9 +80,9 @@ func TestUnpublishVolume(t *testing.T) { assert.NotNil(t, response) assert.NotEmpty(t, mounter.MountPoints) - appMount, err := publisher.db.ReadOSMount(metadata.OSMount{VolumeMetaID: testVolumeId}) - require.Error(t, err) - assert.Nil(t, appMount) + volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(context.Background(), testVolumeId) + require.NoError(t, err) + assert.Nil(t, volume) }) } @@ -165,48 +125,48 @@ func newPublisherForTesting(mounter *mount.FakeMounter) HostVolumePublisher { func mockPublishedvolume(t *testing.T, publisher *HostVolumePublisher) { mockDynakube(t, publisher) - osMount := metadata.OSMount{VolumeMetaID: testVolumeId, VolumeMeta: metadata.VolumeMeta{ID: testVolumeId}, TenantUUID: testTenantUUID} - err := publisher.db.CreateOSMount(&osMount) + now := time.Now() + err := publisher.db.InsertOsAgentVolume(context.Background(), metadata.NewOsAgentVolume(testVolumeId, testTenantUUID, true, &now)) require.NoError(t, err) } func mockDynakube(t *testing.T, publisher *HostVolumePublisher) { - tenantConfig := metadata.TenantConfig{Name: testDynakubeName, TenantUUID: testTenantUUID, DownloadedCodeModuleVersion: "some-version", MaxFailedMountAttempts: 0} - err := publisher.db.CreateTenantConfig(&tenantConfig) + err := publisher.db.InsertDynakube(context.Background(), metadata.NewDynakube(testDynakubeName, testTenantUUID, "some-version", "", 0)) require.NoError(t, err) } func mockDynakubeWithoutVersion(t *testing.T, publisher *HostVolumePublisher) { - tenantConfig := metadata.TenantConfig{Name: testDynakubeName, TenantUUID: testTenantUUID, DownloadedCodeModuleVersion: "", MaxFailedMountAttempts: 0} - err := publisher.db.CreateTenantConfig(&tenantConfig) + err := publisher.db.InsertDynakube(context.Background(), metadata.NewDynakube(testDynakubeName, testTenantUUID, "", "", 0)) require.NoError(t, err) } func assertReferencesForPublishedVolume(t *testing.T, publisher *HostVolumePublisher, mounter *mount.FakeMounter) { assert.NotEmpty(t, mounter.MountPoints) - volume, err := publisher.db.ReadOSMount(metadata.OSMount{VolumeMetaID: testVolumeId}) + volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(context.Background(), testVolumeId) require.NoError(t, err) - assert.Equal(t, testVolumeId, volume.VolumeMetaID) + assert.Equal(t, testVolumeId, volume.VolumeID) assert.Equal(t, testTenantUUID, volume.TenantUUID) + assert.True(t, volume.Mounted) } func assertReferencesForUnpublishedVolume(t *testing.T, publisher *HostVolumePublisher) { - volume, err := publisher.db.ReadOSMount(metadata.OSMount{VolumeMetaID: testVolumeId}) - require.Error(t, err) - assert.Nil(t, volume) + volume, err := publisher.db.GetOsAgentVolumeViaVolumeID(context.Background(), testVolumeId) + require.NoError(t, err) + assert.NotNil(t, volume) + assert.False(t, volume.Mounted) } -func createTestVolumeConfig() csivolumes.VolumeConfig { - return csivolumes.VolumeConfig{ - VolumeInfo: createTestVolumeInfo(), +func createTestVolumeConfig() *csivolumes.VolumeConfig { + return &csivolumes.VolumeConfig{ + VolumeInfo: *createTestVolumeInfo(), Mode: Mode, DynakubeName: testDynakubeName, } } -func createTestVolumeInfo() csivolumes.VolumeInfo { - return csivolumes.VolumeInfo{ +func createTestVolumeInfo() *csivolumes.VolumeInfo { + return &csivolumes.VolumeInfo{ VolumeID: testVolumeId, TargetPath: testTargetPath, } diff --git a/pkg/controllers/csi/driver/volumes/publisher.go b/pkg/controllers/csi/driver/volumes/publisher.go index ea9b9168d2..4ca9547a09 100644 --- a/pkg/controllers/csi/driver/volumes/publisher.go +++ b/pkg/controllers/csi/driver/volumes/publisher.go @@ -7,7 +7,7 @@ import ( ) type Publisher interface { - PublishVolume(ctx context.Context, volumeCfg VolumeConfig) (*csi.NodePublishVolumeResponse, error) - UnpublishVolume(ctx context.Context, volumeInfo VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) - CanUnpublishVolume(ctx context.Context, volumeInfo VolumeInfo) (bool, error) + PublishVolume(ctx context.Context, volumeCfg *VolumeConfig) (*csi.NodePublishVolumeResponse, error) + UnpublishVolume(ctx context.Context, volumeInfo *VolumeInfo) (*csi.NodeUnpublishVolumeResponse, error) + CanUnpublishVolume(ctx context.Context, volumeInfo *VolumeInfo) (bool, error) } diff --git a/pkg/controllers/csi/driver/volumes/volume_config.go b/pkg/controllers/csi/driver/volumes/volume_config.go index 873e29b0a6..8b81cfb998 100644 --- a/pkg/controllers/csi/driver/volumes/volume_config.go +++ b/pkg/controllers/csi/driver/volumes/volume_config.go @@ -95,5 +95,5 @@ func ParseNodeUnpublishVolumeRequest(req *csi.NodeUnpublishVolumeRequest) (*Volu return nil, status.Error(codes.InvalidArgument, "Target path missing in request") } - return &VolumeInfo{VolumeID: volumeID, TargetPath: targetPath}, nil + return &VolumeInfo{volumeID, targetPath}, nil } diff --git a/pkg/controllers/csi/driver/volumes/volume_config_test.go b/pkg/controllers/csi/driver/volumes/volume_config_test.go index 3e31a804f7..fd59fbc303 100644 --- a/pkg/controllers/csi/driver/volumes/volume_config_test.go +++ b/pkg/controllers/csi/driver/volumes/volume_config_test.go @@ -9,10 +9,9 @@ import ( ) const ( - testVolumeId = "a-volume-id" - testTargetPath = "a-target-path" - testPodUID = "a-pod-uid" - testDynakubeName = "a-dynakube" + testVolumeId = "a-volume-id" + testTargetPath = "a-target-path" + testPodUID = "a-pod-uid" ) func TestCSIDriverServer_ParsePublishVolumeRequest(t *testing.T) { diff --git a/pkg/controllers/csi/gc/binaries.go b/pkg/controllers/csi/gc/binaries.go index e5a39a3d34..76f1d6fbe0 100644 --- a/pkg/controllers/csi/gc/binaries.go +++ b/pkg/controllers/csi/gc/binaries.go @@ -1,84 +1,165 @@ package csigc import ( + "context" "os" + "strings" - "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" + "github.com/pkg/errors" "github.com/spf13/afero" + mount "k8s.io/mount-utils" ) -func (gc *CSIGarbageCollector) runBinaryGarbageCollection() { - fs := &afero.Afero{Fs: gc.fs} +func (gc *CSIGarbageCollector) runBinaryGarbageCollection(ctx context.Context, tenantUUID string) error { + binDirs, err := gc.getSharedBinDirs() + if err != nil { + return err + } + + oldBinDirs, err := gc.getTenantBinDirs(tenantUUID) + if err != nil { + return err + } - gcRunsMetric.Inc() + binDirs = append(binDirs, oldBinDirs...) - codeModules, err := gc.db.ListDeletedCodeModules() + binsToDelete, err := gc.collectUnusedAgentBins(ctx, binDirs, tenantUUID) if err != nil { - log.Error(err, "failed to read deleted codemodules") + return err + } - return + if len(binsToDelete) == 0 { + log.Info("no shared binary dirs to delete on the node") + + return nil } - for _, codeModule := range codeModules { - if !gc.time.Now().Time.After(codeModule.DeletedAt.Time.Add(safeRemovalThreshold)) { - log.Info("skipping recently orphaned codemodule", "version", codeModule.Version, "location", codeModule.Location) + return gc.deleteBinDirs(binsToDelete) +} - continue - } +func (gc *CSIGarbageCollector) collectUnusedAgentBins(ctx context.Context, imageDirs []os.FileInfo, tenantUUID string) ([]string, error) { + var toDelete []string - isNotMounted, err := gc.isNotMounted(gc.mounter, codeModule.Location) - if err != nil { - log.Info("failed to determine if AppMount is still mounted", "location", codeModule.Location, "version", codeModule.Version, "err", err.Error()) + usedAgentVersions, err := gc.db.GetLatestVersions(ctx) + if err != nil { + log.Info("failed to get the used image versions") + + return nil, err + } + + usedAgentDigest, err := gc.db.GetUsedImageDigests(ctx) + if err != nil { + log.Info("failed to get the used image digests") + + return nil, err + } + mountedAgentBins, err := getRelevantOverlayMounts(gc.mounter, []string{gc.path.AgentBinaryDir(tenantUUID), gc.path.AgentSharedBinaryDirBase()}) + if err != nil { + log.Info("failed to get all mounted versions") + + return nil, err + } + + for _, imageDir := range imageDirs { + agentBin := imageDir.Name() + sharedPath := gc.path.AgentSharedBinaryDirForAgent(agentBin) + tenantPath := gc.path.AgentBinaryDirForVersion(tenantUUID, agentBin) + + switch { + case usedAgentVersions[agentBin]: // versions that may not be used, but a dynakube references it + continue + case usedAgentDigest[agentBin]: // images that may not be used, but a dynakube references it continue } - if !isNotMounted { - log.Info("AppMount is still mounted", "location", codeModule.Location, "version", codeModule.Version) + if !mountedAgentBins[sharedPath] { // based on mount, active shared codemodule mounts + toDelete = append(toDelete, sharedPath) + } - continue + if !mountedAgentBins[tenantPath] { // based on mount, active tenant codemodule mounts + toDelete = append(toDelete, tenantPath) } + } - log.Info("cleaning up orphaned codemodule binary", "version", codeModule.Version, "location", codeModule.Location) - removeUnusedVersion(fs, codeModule.Location) + return toDelete, nil +} - err = gc.db.PurgeCodeModule(&metadata.CodeModule{Version: codeModule.Version}) +func (gc *CSIGarbageCollector) deleteBinDirs(imageDirs []string) error { + for _, dir := range imageDirs { + err := gc.fs.RemoveAll(dir) if err != nil { - log.Error(err, "failed to delete codemodule database entry") + log.Info("failed to delete codemodule bin dir", "dir", dir) - return + return errors.WithStack(err) } + + log.Info("deleted codemodule bin dir", "dir", dir) } + + return nil +} + +func (gc *CSIGarbageCollector) getTenantBinDirs(tenantUUID string) ([]os.FileInfo, error) { + binPath := gc.path.AgentBinaryDir(tenantUUID) + + binDirs, err := afero.Afero{Fs: gc.fs}.ReadDir(binPath) + if os.IsNotExist(err) { + log.Info("no codemodule versions stored in deprecated path", "path", binPath) + + return nil, nil + } else if err != nil { + log.Info("failed to read codemodule versions stored in deprecated path", "path", binPath) + + return nil, errors.WithStack(err) + } + + return binDirs, nil } -func removeUnusedVersion(fs *afero.Afero, binaryPath string) { - size, _ := dirSize(fs, binaryPath) +func (gc *CSIGarbageCollector) getSharedBinDirs() ([]os.FileInfo, error) { + sharedPath := gc.path.AgentSharedBinaryDirBase() + + imageDirs, err := afero.Afero{Fs: gc.fs}.ReadDir(sharedPath) + if os.IsNotExist(err) { + log.Info("no shared codemodules stored ", "path", sharedPath) + + return nil, nil + } - err := fs.RemoveAll(binaryPath) if err != nil { - log.Error(err, "codemodule delete failed", "path", binaryPath) - } else { - foldersRemovedMetric.Inc() - reclaimedMemoryMetric.Add(float64(size)) + log.Info("failed to read shared image directory", "path", sharedPath) + + return nil, errors.WithStack(err) } - log.Info("removed outdate CodeModule binary", "location", binaryPath) + return imageDirs, nil } -func dirSize(fs *afero.Afero, path string) (int64, error) { - var size int64 +func getRelevantOverlayMounts(mounter mount.Interface, baseFolders []string) (map[string]bool, error) { + mountPoints, err := mounter.List() + if err != nil { + log.Error(err, "failed to list all mount points") - err := fs.Walk(path, func(_ string, info os.FileInfo, err error) error { - if err != nil { - return err - } + return nil, err + } - if !info.IsDir() { - size += info.Size() - } + relevantMounts := map[string]bool{} - return err - }) + for _, mountPoint := range mountPoints { + if mountPoint.Device == "overlay" { + for _, opt := range mountPoint.Opts { + for _, baseFolder := range baseFolders { + if strings.HasPrefix(opt, "lowerdir="+baseFolder) { + split := strings.Split(opt, "=") + relevantMounts[split[1]] = true + + break + } + } + } + } + } - return size, err + return relevantMounts, nil } diff --git a/pkg/controllers/csi/gc/binaries_test.go b/pkg/controllers/csi/gc/binaries_test.go index 81902958fa..dedfb527b6 100644 --- a/pkg/controllers/csi/gc/binaries_test.go +++ b/pkg/controllers/csi/gc/binaries_test.go @@ -1,172 +1,171 @@ package csigc import ( - "fmt" - "path/filepath" + "context" + "os" "testing" - "time" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" - "github.com/Dynatrace/dynatrace-operator/pkg/util/timeprovider" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/testutil" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "k8s.io/utils/mount" + mount "k8s.io/mount-utils" ) const ( - testTenantUUID = "asd12345" - testVersion1 = "1" - testVersion2 = "2" - testVersion3 = "3" - testRootDir = "/tmp" + testVersion = "some-version" ) var ( - testBinaryDir = filepath.Join(testRootDir, testTenantUUID, "bin") + testPathResolver = metadata.PathResolver{ + RootDir: "test", + } ) func TestRunBinaryGarbageCollection(t *testing.T) { - t.Run("succeeds when no version present", func(t *testing.T) { - resetMetrics() - - gc := NewMockGarbageCollector() - - gc.runBinaryGarbageCollection() - - assert.InDelta(t, 1, testutil.ToFloat64(gcRunsMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(foldersRemovedMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(reclaimedMemoryMetric), 0.01) + ctx := context.Background() + + t.Run("bad database", func(t *testing.T) { + testDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + fs := createTestDirs(t, testDir) + gc := CSIGarbageCollector{ + fs: fs, + db: &metadata.FakeFailDB{}, + path: testPathResolver, + } + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.Error(t, err) }) - t.Run("succeeds when no version available", func(t *testing.T) { - resetMetrics() - - gc := NewMockGarbageCollector() - _ = gc.fs.MkdirAll(testBinaryDir, 0770) - - gc.runBinaryGarbageCollection() - - assert.InDelta(t, 1, testutil.ToFloat64(gcRunsMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(foldersRemovedMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(reclaimedMemoryMetric), 0.01) + t.Run("no error on empty fs", func(t *testing.T) { + gc := CSIGarbageCollector{ + fs: afero.NewMemMapFs(), + mounter: mount.NewFakeMounter(nil), + db: metadata.FakeMemoryDB(), + } + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.NoError(t, err) }) - t.Run("remove unused", func(t *testing.T) { - resetMetrics() - - gc := NewMockGarbageCollector() - gc.mockUnusedVersions(testVersion1, testVersion2, testVersion3) - gc.time.Set(time.Now().Add(2 * safeRemovalThreshold)) - - gc.runBinaryGarbageCollection() - - assert.InDelta(t, 1, testutil.ToFloat64(gcRunsMetric), 0.01) - assert.InDelta(t, 3, testutil.ToFloat64(foldersRemovedMetric), 0.01) + t.Run("deletes unused", func(t *testing.T) { + testSharedDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + testTenantBinDir := testPathResolver.AgentBinaryDirForVersion(testTenantUUID, testVersion) + fs := createTestDirs(t, testSharedDir, testTenantBinDir) + gc := CSIGarbageCollector{ + fs: fs, + db: metadata.FakeMemoryDB(), + mounter: mount.NewFakeMounter(nil), + path: testPathResolver, + } + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.NoError(t, err) + _, err = fs.Stat(testSharedDir) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) - gc.assertVersionNotExists(t, testVersion1, testVersion3) + _, err = fs.Stat(testTenantBinDir) + require.Error(t, err) + assert.True(t, os.IsNotExist(err)) }) - t.Run("ignore recently deleted", func(t *testing.T) { - resetMetrics() - - gc := NewMockGarbageCollector() - gc.mockUnusedVersions(testVersion1, testVersion2, testVersion3) - - gc.runBinaryGarbageCollection() - - assert.InDelta(t, 1, testutil.ToFloat64(gcRunsMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(foldersRemovedMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(reclaimedMemoryMetric), 0.01) + t.Run("deletes nothing, because of dynakube metadata present", func(t *testing.T) { + testDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + fs := createTestDirs(t, testDir) + gc := CSIGarbageCollector{ + fs: fs, + db: metadata.FakeMemoryDB(), + mounter: mount.NewFakeMounter(nil), + } + gc.db.InsertDynakube(ctx, &metadata.Dynakube{ + Name: "test", + TenantUUID: "test", + LatestVersion: "test", + ImageDigest: testVersion, + }) + + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.NoError(t, err) - gc.assertVersionExists(t, testVersion1, testVersion2, testVersion3) + _, err = fs.Stat(testDir) + require.NoError(t, err) }) - t.Run("ignore used", func(t *testing.T) { - resetMetrics() - - gc := NewMockGarbageCollector() - gc.mockUsedVersions(t, testVersion1, testVersion2, testVersion3) - - gc.runBinaryGarbageCollection() - - assert.InDelta(t, 1, testutil.ToFloat64(gcRunsMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(foldersRemovedMetric), 0.01) - assert.InDelta(t, 0, testutil.ToFloat64(reclaimedMemoryMetric), 0.01) + t.Run("deletes nothing, because of volume metadata present", func(t *testing.T) { + testDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + fs := createTestDirs(t, testDir) + gc := CSIGarbageCollector{ + fs: fs, + db: metadata.FakeMemoryDB(), + mounter: mount.NewFakeMounter(nil), + } + gc.db.InsertVolume(ctx, &metadata.Volume{ + VolumeID: "test", + TenantUUID: "test", + Version: testVersion, + PodName: "test", + }) + + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.NoError(t, err) - gc.assertVersionExists(t, testVersion1, testVersion2, testVersion3) + _, err = fs.Stat(testDir) + require.NoError(t, err) }) -} + t.Run("deletes nothing, because directory is mounted", func(t *testing.T) { + testSharedDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + testTenantBinDir := testPathResolver.AgentBinaryDirForVersion(testTenantUUID, testVersion) + fs := createTestDirs(t, testSharedDir, testTenantBinDir) + gc := CSIGarbageCollector{ + fs: fs, + db: metadata.FakeMemoryDB(), + mounter: mount.NewFakeMounter([]mount.MountPoint{ + { + Type: "overlay", + Opts: []string{"upperdir=beep", "lowerdir=" + testSharedDir, "workdir=boop"}, + }, + { + Type: "overlay", + Opts: []string{"lowerdir=" + testTenantBinDir, "upperdir=beep", "workdir=boop"}, + }, + }), + } -func NewMockGarbageCollector() *CSIGarbageCollector { - return &CSIGarbageCollector{ - fs: afero.NewMemMapFs(), - db: metadata.FakeMemoryDB(), - path: metadata.PathResolver{RootDir: testRootDir}, - time: timeprovider.New(), - maxUnmountedVolumeAge: defaultMaxUnmountedCsiVolumeAge, - mounter: mount.NewFakeMounter([]mount.MountPoint{}), - isNotMounted: mockIsNotMounted(map[string]error{}), - } -} + err := gc.runBinaryGarbageCollection(ctx, testTenantUUID) + require.NoError(t, err) -func (gc *CSIGarbageCollector) mockUnusedVersions(versions ...string) { - _ = gc.fs.Mkdir(testBinaryDir, 0770) + _, err = fs.Stat(testSharedDir) + require.NoError(t, err) - gc.isNotMounted = mockIsNotMounted(map[string]error{}) - for _, version := range versions { - gc.db.(metadata.Access).CreateCodeModule(&metadata.CodeModule{Version: version, Location: filepath.Join(testBinaryDir, version)}) - _, _ = gc.fs.Create(filepath.Join(testBinaryDir, version)) - gc.db.(metadata.Access).DeleteCodeModule(&metadata.CodeModule{Version: version}) - } + _, err = fs.Stat(testTenantBinDir) + require.NoError(t, err) + }) } -func (gc *CSIGarbageCollector) mockUsedVersions(t *testing.T, versions ...string) { - _ = gc.fs.Mkdir(testBinaryDir, 0770) - for i, version := range versions { - _, _ = gc.fs.Create(filepath.Join(testBinaryDir, version)) - appMount := metadata.AppMount{ - VolumeMeta: metadata.VolumeMeta{ID: fmt.Sprintf("volume%b", i), PodName: fmt.Sprintf("pod%b", i)}, - VolumeMetaID: fmt.Sprintf("volume%b", i), - CodeModuleVersion: version, - MountAttempts: 0, +func TestGetSharedImageDirs(t *testing.T) { + t.Run("no error on empty fs", func(t *testing.T) { + fs := afero.NewMemMapFs() + gc := CSIGarbageCollector{ + fs: fs, + path: testPathResolver, } - err := gc.db.(metadata.Access).CreateAppMount(&appMount) + dirs, err := gc.getSharedBinDirs() require.NoError(t, err) - - gc.db.(metadata.Access).CreateCodeModule(&metadata.CodeModule{Version: version, Location: filepath.Join(testBinaryDir, version)}) - } -} - -func (gc *CSIGarbageCollector) assertVersionNotExists(t *testing.T, versions ...string) { - for _, version := range versions { - exists, err := afero.Exists(gc.fs, filepath.Join(testBinaryDir, version)) - assert.False(t, exists) + assert.Nil(t, dirs) + }) + t.Run("get image cache dirs", func(t *testing.T) { + testDir := testPathResolver.AgentSharedBinaryDirForAgent(testVersion) + fs := createTestDirs(t, testDir) + gc := CSIGarbageCollector{ + fs: fs, + path: testPathResolver, + } + dirs, err := gc.getSharedBinDirs() require.NoError(t, err) - } + assert.Len(t, dirs, 1) + }) } -func (gc *CSIGarbageCollector) assertVersionExists(t *testing.T, versions ...string) { - for _, version := range versions { - exists, err := afero.Exists(gc.fs, filepath.Join(testBinaryDir, version)) - assert.True(t, exists) - require.NoError(t, err) +func createTestDirs(t *testing.T, paths ...string) afero.Fs { + fs := afero.NewMemMapFs() + for _, path := range paths { + require.NoError(t, fs.MkdirAll(path, 0755)) } -} -// This is a very ugly hack, but because you can't Set the value of a Counter metric you have to create new ones to reset them between runs. -func resetMetrics() { - gcRunsMetric = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "test", - Subsystem: "csi_driver", - Name: "gc_runs", - }) - foldersRemovedMetric = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "test", - Subsystem: "csi_driver", - Name: "gc_folder_rm", - }) - reclaimedMemoryMetric = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "test", - Subsystem: "csi_driver", - Name: "gc_memory_reclaimed", - }) + return fs } diff --git a/pkg/controllers/csi/gc/reconciler.go b/pkg/controllers/csi/gc/reconciler.go index 633462abcc..09126409b2 100644 --- a/pkg/controllers/csi/gc/reconciler.go +++ b/pkg/controllers/csi/gc/reconciler.go @@ -3,51 +3,43 @@ package csigc import ( "context" "os" - "path" "time" + dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" - "github.com/Dynatrace/dynatrace-operator/pkg/util/timeprovider" + "github.com/pkg/errors" "github.com/spf13/afero" - "k8s.io/utils/mount" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + mount "k8s.io/mount-utils" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // CSIGarbageCollector removes unused and outdated agent versions type CSIGarbageCollector struct { - apiReader client.Reader - fs afero.Fs - db metadata.Cleaner - mounter mount.Interface - time *timeprovider.Provider - isNotMounted mountChecker + apiReader client.Reader + fs afero.Fs + db metadata.Access + mounter mount.Interface path metadata.PathResolver maxUnmountedVolumeAge time.Duration } -// necessary for mocking, as the MounterMock will use the os package -type mountChecker func(mounter mount.Interface, file string) (bool, error) - var _ reconcile.Reconciler = (*CSIGarbageCollector)(nil) -const ( - safeRemovalThreshold = 5 * time.Minute -) - // NewCSIGarbageCollector returns a new CSIGarbageCollector -func NewCSIGarbageCollector(apiReader client.Reader, opts dtcsi.CSIOptions, db metadata.Cleaner) *CSIGarbageCollector { +func NewCSIGarbageCollector(apiReader client.Reader, opts dtcsi.CSIOptions, db metadata.Access) *CSIGarbageCollector { + mounter := mount.New("") + return &CSIGarbageCollector{ apiReader: apiReader, fs: afero.NewOsFs(), db: db, path: metadata.PathResolver{RootDir: opts.RootDir}, - time: timeprovider.New(), - mounter: mount.New(""), - isNotMounted: mount.IsNotMountPoint, + mounter: mounter, maxUnmountedVolumeAge: determineMaxUnmountedVolumeAge(os.Getenv(maxUnmountedCsiVolumeAgeEnv)), } } @@ -55,80 +47,61 @@ func NewCSIGarbageCollector(apiReader client.Reader, opts dtcsi.CSIOptions, db m func (gc *CSIGarbageCollector) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { log.Info("running OneAgent garbage collection", "namespace", request.Namespace, "name", request.Name) - log.Info("running binary garbage collection") - gc.runBinaryGarbageCollection() - - if err := ctx.Err(); err != nil { - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err - } - - tenantConfigs, err := gc.db.ListDeletedTenantConfigs() + defaultReconcileResult := reconcile.Result{} + dynakube, err := getDynakubeFromRequest(ctx, gc.apiReader, request) if err != nil { - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err + return defaultReconcileResult, err } - log.Info("running log garbage collection") - - for _, tenantConfig := range tenantConfigs { - log.Info("cleaning up soft deleted tenant-config", "name", tenantConfig.Name) + if dynakube == nil { + return defaultReconcileResult, nil + } - gc.runUnmountedVolumeGarbageCollection(tenantConfig.TenantUUID) + if !dynakube.NeedAppInjection() { + log.Info("app injection not enabled, skip garbage collection", "dynakube", dynakube.Name) - err := gc.runOSMountGarbageCollection(tenantConfig) - if err != nil { - continue - } + return defaultReconcileResult, nil + } - err = gc.db.PurgeTenantConfig(&tenantConfig) - if err != nil { - log.Info("failed to remove the soft deleted tenant-config entry, will try again", "name", tenantConfig.Name) + tenantUUID, err := dynakube.TenantUUIDFromApiUrl() + if err != nil { + log.Info("failed to get tenantUUID of DynaKube, checking later") - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, nil //nolint: nilerr - } + return defaultReconcileResult, err } + log.Info("running log garbage collection") + gc.runUnmountedVolumeGarbageCollection(tenantUUID) + if err := ctx.Err(); err != nil { - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err + return defaultReconcileResult, err } - return reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, nil -} - -func (gc *CSIGarbageCollector) runOSMountGarbageCollection(tenantConfig metadata.TenantConfig) error { - osMounts, err := gc.db.ListDeletedOSMounts() - if err != nil { - return err - } + log.Info("running binary garbage collection") - for _, osm := range osMounts { - if !gc.time.Now().Time.After(osm.DeletedAt.Time.Add(safeRemovalThreshold)) { - log.Info("skipping recently removed os-mount", "location", osm.Location) + if err := gc.runBinaryGarbageCollection(ctx, tenantUUID); err != nil { + log.Info("failed to garbage collect the shared images") - continue - } + return defaultReconcileResult, err + } - if osm.TenantConfig.UID == tenantConfig.UID { - isNotMounted, err := gc.isNotMounted(gc.mounter, osm.Location) - if err != nil { - log.Info("failed to determine if OSMount is still mounted", "location", osm.Location, "tenantConfig", osm.TenantConfig.Name, "err", err.Error()) + return defaultReconcileResult, nil +} - continue - } +func getDynakubeFromRequest(ctx context.Context, apiReader client.Reader, request reconcile.Request) (*dynatracev1beta2.DynaKube, error) { + var dynakube dynatracev1beta2.DynaKube + if err := apiReader.Get(ctx, request.NamespacedName, &dynakube); err != nil { + if k8serrors.IsNotFound(err) { + log.Info("given DynaKube object not found") - if !isNotMounted { - log.Info("OSMount is still mounted", "location", osm.Location, "tenantConfig", osm.TenantConfig.Name) + return nil, nil //nolint: nilnil + } - continue - } + log.Info("failed to get DynaKube object") - dir, _ := afero.ReadDir(gc.fs, osm.Location) - for _, d := range dir { - gc.fs.RemoveAll(path.Join([]string{osm.Location, d.Name()}...)) - log.Info("removed outdate contents from OSMount folder", "location", osm.Location) - } - } + return nil, errors.WithStack(err) } - return nil + return &dynakube, nil } diff --git a/pkg/controllers/csi/gc/reconciler_test.go b/pkg/controllers/csi/gc/reconciler_test.go index 2b5d288f2a..d47f6ff311 100644 --- a/pkg/controllers/csi/gc/reconciler_test.go +++ b/pkg/controllers/csi/gc/reconciler_test.go @@ -7,14 +7,12 @@ import ( "github.com/Dynatrace/dynatrace-operator/pkg/api/scheme/fake" dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" - dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/mount" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -33,34 +31,13 @@ func TestReconcile(t *testing.T) { }, } gc := CSIGarbageCollector{ - apiReader: fake.NewClient(&dynakube), - fs: afero.NewMemMapFs(), - db: metadata.FakeMemoryDB(), - mounter: mount.NewFakeMounter([]mount.MountPoint{}), - isNotMounted: mockIsNotMounted(map[string]error{}), + apiReader: fake.NewClient(&dynakube), + fs: afero.NewMemMapFs(), + db: metadata.FakeMemoryDB(), } - result, err := gc.Reconcile(context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakube.Name}}) + result, err := gc.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakube.Name}}) require.NoError(t, err) - assert.Equal(t, reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, result) + assert.Equal(t, reconcile.Result{}, result) }) } - -// mockIsNotMounted is rather confusing because of the double negation. -// you can pass in a map of filepaths, each path will be considered as mounted if corresponding error value is nil. (so returns false) -// if the filepath was not provided in the map, then the path is considered as not mounted. (so returns true) -// if an error was provided for a filepath in the map, then that path will cause the return of that error. -func mockIsNotMounted(files map[string]error) mountChecker { - return func(mounter mount.Interface, file string) (bool, error) { - err, ok := files[file] - if !ok { - return true, nil // unknown path => not mounted, no mocked error - } - - if err == nil { - return false, nil // known path => mounted, no mocked error - } - - return false, err // mocked error for path - } -} diff --git a/pkg/controllers/csi/gc/unmounted.go b/pkg/controllers/csi/gc/unmounted.go index 0108e74f68..9696add7eb 100644 --- a/pkg/controllers/csi/gc/unmounted.go +++ b/pkg/controllers/csi/gc/unmounted.go @@ -13,7 +13,6 @@ const ( maxUnmountedCsiVolumeAgeEnv = "MAX_UNMOUNTED_VOLUME_AGE" ) -// TODO: Rework to use the database entries instead of the filesystem func (gc *CSIGarbageCollector) runUnmountedVolumeGarbageCollection(tenantUUID string) { unmountedVolumes, err := gc.getUnmountedVolumes(tenantUUID) if err != nil { diff --git a/pkg/controllers/csi/gc/unmounted_test.go b/pkg/controllers/csi/gc/unmounted_test.go index 943b7b8ec6..4cef241376 100644 --- a/pkg/controllers/csi/gc/unmounted_test.go +++ b/pkg/controllers/csi/gc/unmounted_test.go @@ -6,9 +6,20 @@ import ( "testing" "time" + "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" "github.com/spf13/afero" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + mount "k8s.io/mount-utils" +) + +const ( + testRootDir = "root-dir" + testTenantUUID = "tenant-12" + + testVersion1 = "v1" + testVersion2 = "v2" + testVersion3 = "v3" ) var ( @@ -161,3 +172,13 @@ func (gc *CSIGarbageCollector) mockUnmountedVolumeIDPath(volumeIDs ...string) { _ = gc.fs.MkdirAll(filepath.Join(testVolumeFolderPath, volumeID, "mapped"), os.ModePerm) } } + +func NewMockGarbageCollector(mountPoints ...mount.MountPoint) *CSIGarbageCollector { + return &CSIGarbageCollector{ + fs: afero.NewMemMapFs(), + db: metadata.FakeMemoryDB(), + path: metadata.PathResolver{RootDir: testRootDir}, + mounter: mount.NewFakeMounter(mountPoints), + maxUnmountedVolumeAge: defaultMaxUnmountedCsiVolumeAge, + } +} diff --git a/pkg/controllers/csi/metadata/cleaner.go b/pkg/controllers/csi/metadata/cleaner.go deleted file mode 100644 index a21e578b07..0000000000 --- a/pkg/controllers/csi/metadata/cleaner.go +++ /dev/null @@ -1,107 +0,0 @@ -package metadata - -import ( - "github.com/pkg/errors" -) - -type Cleaner interface { - ListDeletedTenantConfigs() ([]TenantConfig, error) - PurgeTenantConfig(tenantConfig *TenantConfig) error - - ListDeletedCodeModules() ([]CodeModule, error) - PurgeCodeModule(codeModule *CodeModule) error - - ListDeletedAppMounts() ([]AppMount, error) - PurgeAppMount(appMount *AppMount) error - - ListDeletedOSMounts() ([]OSMount, error) - PurgeOSMount(osMount *OSMount) error -} - -var _ Cleaner = &GormConn{} - -func (conn *GormConn) ListDeletedTenantConfigs() ([]TenantConfig, error) { - var tenantConfigs []TenantConfig - - result := conn.db.WithContext(conn.ctx).Unscoped().Where("deleted_at is not ?", nil).Find(&tenantConfigs) - if result.Error != nil { - return nil, result.Error - } - - return tenantConfigs, nil -} - -func (conn *GormConn) PurgeTenantConfig(tenantConfig *TenantConfig) error { - if (tenantConfig == nil || *tenantConfig == TenantConfig{}) { - return errors.New("Can't delete an empty TenantConfig") - } - - return conn.db.WithContext(conn.ctx).Unscoped().Delete(&TenantConfig{}, tenantConfig).Error -} - -func (conn *GormConn) ListDeletedCodeModules() ([]CodeModule, error) { - var codeModules []CodeModule - - result := conn.db.WithContext(conn.ctx).Unscoped().Where("deleted_at is not ?", nil).Find(&codeModules) - if result.Error != nil { - return nil, result.Error - } - - return codeModules, nil -} - -func (conn *GormConn) PurgeCodeModule(codeModule *CodeModule) error { - if (codeModule == nil || *codeModule == CodeModule{}) { - return errors.New("Can't delete an empty CodeModule") - } - - return conn.db.WithContext(conn.ctx).Unscoped().Delete(&CodeModule{}, codeModule).Error -} - -func (conn *GormConn) ListDeletedAppMounts() ([]AppMount, error) { - var appMounts []AppMount - - result := conn.db.WithContext(conn.ctx).Unscoped().Where("deleted_at is not ?", nil).Preload("VolumeMeta").Preload("CodeModule").Find(&appMounts) - if result.Error != nil { - return nil, result.Error - } - - return appMounts, nil -} - -func (conn *GormConn) PurgeAppMount(appMount *AppMount) error { - if (appMount == nil || *appMount == AppMount{}) { - return errors.New("Can't delete an empty AppMount") - } - - err := conn.db.WithContext(conn.ctx).Unscoped().Delete(&AppMount{}, appMount).Error - if err != nil { - return errors.New("couldn't purge app mount, err: " + err.Error()) - } - - return conn.db.WithContext(conn.ctx).Unscoped().Delete(&VolumeMeta{}, appMount.VolumeMeta).Error -} - -func (conn *GormConn) ListDeletedOSMounts() ([]OSMount, error) { - var osMounts []OSMount - - result := conn.db.WithContext(conn.ctx).Unscoped().Where("deleted_at is not ?", nil).Preload("VolumeMeta").Preload("TenantConfig").Find(&osMounts) - if result.Error != nil { - return nil, result.Error - } - - return osMounts, nil -} - -func (conn *GormConn) PurgeOSMount(osMount *OSMount) error { - if (osMount == nil || *osMount == OSMount{}) { - return errors.New("Can't delete an empty OSMount") - } - - err := conn.db.WithContext(conn.ctx).Unscoped().Delete(&OSMount{}, osMount).Error - if err != nil { - return errors.New("couldn't purge an os mount, err: " + err.Error()) - } - - return conn.db.WithContext(conn.ctx).Unscoped().Delete(&VolumeMeta{}, osMount.VolumeMeta).Error -} diff --git a/pkg/controllers/csi/metadata/cleaner_test.go b/pkg/controllers/csi/metadata/cleaner_test.go deleted file mode 100644 index e96dd8e197..0000000000 --- a/pkg/controllers/csi/metadata/cleaner_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package metadata - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func fillWithTenantConfigs(t *testing.T, db *GormConn, amount int) { - for i := range amount { - tConfig := generateTenantConfig(i) - err := db.CreateTenantConfig(tConfig) - require.NoError(t, err) - - if i%2 == 0 { - err := db.DeleteTenantConfig(tConfig, false) - require.NoError(t, err) - } - } -} - -func generateTenantConfig(i int) *TenantConfig { - return &TenantConfig{ - Name: fmt.Sprintf("tenant-%d", i), - DownloadedCodeModuleVersion: fmt.Sprintf("version-%d", i), - ConfigDirPath: fmt.Sprintf("path-%d", i), - TenantUUID: fmt.Sprintf("uuid-%d", i), - MaxFailedMountAttempts: int64(i), - } -} - -func TestListDeletedTenantConfigs(t *testing.T) { - t.Run("empty database => no error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - configs, err := db.ListDeletedTenantConfigs() - require.NoError(t, err) - assert.Empty(t, configs) - }) - - t.Run("only list deleted", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithTenantConfigs(t, db, initialLength) - - configs, err := db.ListDeletedTenantConfigs() - require.NoError(t, err) - assert.Len(t, configs, initialLength/2) - }) -} - -func TestPurgeTenantConfig(t *testing.T) { - t.Run("nil/empty input => error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - err = db.PurgeTenantConfig(nil) - require.Error(t, err) - - err = db.PurgeTenantConfig(&TenantConfig{}) - require.Error(t, err) - }) - - t.Run("delete everything", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithTenantConfigs(t, db, initialLength) - - for i := range initialLength { - err = db.PurgeTenantConfig(generateTenantConfig(i)) - require.NoError(t, err) - } - - tcs, err := db.ListDeletedTenantConfigs() - require.NoError(t, err) - assert.Empty(t, tcs) - }) -} - -func fillWithCodeModules(t *testing.T, db *GormConn, amount int) { - for i := range amount { - cm := generateCodeModule(i) - err := db.CreateCodeModule(cm) - require.NoError(t, err) - - if i%2 == 0 { - err := db.DeleteCodeModule(cm) - require.NoError(t, err) - } - } -} - -func generateCodeModule(i int) *CodeModule { - return &CodeModule{ - Version: fmt.Sprintf("version-%d", i), - Location: fmt.Sprintf("location-%d", i), - } -} - -func TestListDeletedCodeModules(t *testing.T) { - t.Run("empty database => no error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - cms, err := db.ListDeletedCodeModules() - require.NoError(t, err) - assert.Empty(t, cms) - }) - - t.Run("only list deleted", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithCodeModules(t, db, initialLength) - - cms, err := db.ListDeletedCodeModules() - require.NoError(t, err) - assert.Len(t, cms, initialLength/2) - }) -} - -func TestPurgeCodeModule(t *testing.T) { - t.Run("nil/empty input => error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - err = db.PurgeCodeModule(nil) - require.Error(t, err) - - err = db.PurgeCodeModule(&CodeModule{}) - require.Error(t, err) - }) - t.Run("delete everything", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithCodeModules(t, db, initialLength) - - for i := range initialLength { - err = db.PurgeCodeModule(generateCodeModule(i)) - require.NoError(t, err) - } - - cms, err := db.ListDeletedCodeModules() - require.NoError(t, err) - assert.Empty(t, cms) - }) -} - -func fillWithAppMounts(t *testing.T, db *GormConn, amount int) { - for i := range amount { - am := generateAppMount(i) - err := db.CreateAppMount(am) - require.NoError(t, err) - - if i%2 == 0 { - err := db.DeleteAppMount(am) - require.NoError(t, err) - } - } -} - -func generateAppMount(i int) *AppMount { - return &AppMount{ - VolumeMetaID: fmt.Sprintf("id-%d", i), - VolumeMeta: VolumeMeta{ID: fmt.Sprintf("id-%d", i)}, - CodeModule: CodeModule{Version: fmt.Sprintf("version-%d", i)}, - CodeModuleVersion: fmt.Sprintf("version-%d", i), - Location: fmt.Sprintf("location-%d", i), - } -} - -func TestListDeletedAppMounts(t *testing.T) { - t.Run("empty database => no error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - mounts, err := db.ListDeletedAppMounts() - require.NoError(t, err) - assert.Empty(t, mounts) - }) - t.Run("only list deleted", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithAppMounts(t, db, initialLength) - - ams, err := db.ListDeletedAppMounts() - require.NoError(t, err) - assert.Len(t, ams, initialLength/2) - }) -} - -func TestPurgeAppMount(t *testing.T) { - t.Run("nil/empty input => error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - err = db.PurgeAppMount(nil) - require.Error(t, err) - - err = db.PurgeAppMount(&AppMount{}) - require.Error(t, err) - }) - t.Run("delete everything", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithAppMounts(t, db, initialLength) - - for i := range initialLength { - err = db.PurgeAppMount(generateAppMount(i)) - require.NoError(t, err) - } - - tcs, err := db.ListDeletedAppMounts() - require.NoError(t, err) - assert.Empty(t, tcs) - }) -} - -func TestListDeletedOSMounts(t *testing.T) { - t.Run("empty database => no error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - mounts, err := db.ListDeletedOSMounts() - require.NoError(t, err) - assert.Empty(t, mounts) - }) - t.Run("only list deleted", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 7 - fillWithOSMounts(t, db, initialLength) - - oms, err := db.ListDeletedOSMounts() - require.NoError(t, err) - assert.Len(t, oms, 2) - - osMount, err := db.ReadOSMount(OSMount{VolumeMetaID: "restore"}) - require.NoError(t, err) - assert.NotEmpty(t, osMount) - - osMount, err = db.ReadOSMount(OSMount{TenantConfig: TenantConfig{Name: "restore"}}) - require.NoError(t, err) - assert.NotEmpty(t, osMount) - }) -} - -func TestPurgeOSMount(t *testing.T) { - t.Run("nil/empty input => error", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - err = db.PurgeOSMount(nil) - require.Error(t, err) - - err = db.PurgeOSMount(&OSMount{}) - require.Error(t, err) - }) - t.Run("delete everything", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - initialLength := 6 - fillWithOSMounts(t, db, initialLength) - - for i := range initialLength { - // The actual UID is auto-generated at time of insert, so here you have to do some workarounds, because if the UID doesn't match it wont be deleted - tmp := generateOSMount(i) - err = db.PurgeOSMount(&OSMount{TenantUUID: tmp.TenantUUID, VolumeMeta: VolumeMeta{ID: tmp.VolumeMetaID}}) - require.NoError(t, err) - } - - tcs, err := db.ListDeletedOSMounts() - require.NoError(t, err) - assert.Empty(t, tcs) - }) -} - -func fillWithOSMounts(t *testing.T, db *GormConn, amount int) { - for i := range amount { - om := generateOSMount(i) - err := db.CreateOSMount(om) - require.NoError(t, err) - - if i%2 == 0 { - err := db.DeleteOSMount(om) - require.NoError(t, err) - } - - if i%4 == 0 { - tmp, err := db.ReadUnscopedOSMount(OSMount{TenantUUID: om.TenantUUID}) - require.NoError(t, err) - assert.NotNil(t, tmp) - tmp.VolumeMeta = VolumeMeta{ID: "restore"} - tmp.TenantConfig = TenantConfig{Name: "restore"} - tmp, err = db.RestoreOSMount(tmp) - require.NoError(t, err) - require.NotNil(t, tmp) - } - } -} - -func generateOSMount(i int) *OSMount { - return &OSMount{ - VolumeMetaID: fmt.Sprintf("id-%d", i), - VolumeMeta: VolumeMeta{ID: fmt.Sprintf("id-%d", i)}, - TenantConfigUID: fmt.Sprintf("t-id-%d", i), - TenantConfig: TenantConfig{UID: fmt.Sprintf("t-id-%d", i), TenantUUID: fmt.Sprintf("uuid-%d", i)}, - Location: fmt.Sprintf("location-%d", i), - TenantUUID: fmt.Sprintf("uuid-%d", i), - MountAttempts: int64(i), - } -} diff --git a/pkg/controllers/csi/metadata/correctness.go b/pkg/controllers/csi/metadata/correctness.go index 1910b31b62..b4b0d4798d 100644 --- a/pkg/controllers/csi/metadata/correctness.go +++ b/pkg/controllers/csi/metadata/correctness.go @@ -44,10 +44,12 @@ func (checker *CorrectnessChecker) CorrectCSI(ctx context.Context) error { return err } - if err := checker.copyCodeModulesFromDeprecatedBin(); err != nil { + if err := checker.copyCodeModulesFromDeprecatedBin(ctx); err != nil { return err } + checker.migrateAppMounts(ctx) + return nil } @@ -59,27 +61,25 @@ func (checker *CorrectnessChecker) removeVolumesForMissingPods(ctx context.Conte return nil } - appMounts, err := checker.access.ReadAppMounts() + podNames, err := checker.access.GetPodNames(ctx) if err != nil { return err } pruned := []string{} - for _, appMount := range appMounts { + for podName := range podNames { var pod corev1.Pod - - if err := checker.apiReader.Get(ctx, client.ObjectKey{Name: appMount.VolumeMeta.PodName}, &pod); !k8serrors.IsNotFound(err) { + if err := checker.apiReader.Get(ctx, client.ObjectKey{Name: podName}, &pod); !k8serrors.IsNotFound(err) { continue } - volumeID := appMount.VolumeMeta.ID - - if err := checker.access.DeleteAppMount(&AppMount{VolumeMetaID: appMount.VolumeMetaID}); err != nil { + volumeID := podNames[podName] + if err := checker.access.DeleteVolume(ctx, volumeID); err != nil { return err } - pruned = append(pruned, volumeID+"|"+appMount.VolumeMeta.PodName) + pruned = append(pruned, volumeID+"|"+podName) } log.Info("CSI volumes database is corrected for missing pods (volume|pod)", "prunedRows", pruned) @@ -95,26 +95,25 @@ func (checker *CorrectnessChecker) removeMissingDynakubes(ctx context.Context) e return nil } - tenantConfigs, err := checker.access.ReadTenantConfigs() + dynakubes, err := checker.access.GetTenantsToDynakubes(ctx) if err != nil { return err } pruned := []string{} - for _, tenantConfig := range tenantConfigs { + for dynakubeName := range dynakubes { var dynakube dynatracev1beta2.DynaKube - - if err := checker.apiReader.Get(ctx, client.ObjectKey{Name: tenantConfig.Name}, &dynakube); !k8serrors.IsNotFound(err) { + if err := checker.apiReader.Get(ctx, client.ObjectKey{Name: dynakubeName}, &dynakube); !k8serrors.IsNotFound(err) { continue } - if err := checker.access.DeleteTenantConfig(&TenantConfig{Name: tenantConfig.Name}, true); err != nil { + if err := checker.access.DeleteDynakube(ctx, dynakubeName); err != nil { return err } - tenantUUID := tenantConfig.TenantUUID - pruned = append(pruned, tenantUUID+"|"+tenantConfig.Name) + tenantUUID := dynakubes[dynakubeName] + pruned = append(pruned, tenantUUID+"|"+dynakubeName) } log.Info("CSI tenants database is corrected for missing dynakubes (tenant|dynakube)", "prunedRows", pruned) @@ -122,21 +121,21 @@ func (checker *CorrectnessChecker) removeMissingDynakubes(ctx context.Context) e return nil } -func (checker *CorrectnessChecker) copyCodeModulesFromDeprecatedBin() error { - tenantConfigs, err := checker.access.ReadTenantConfigs() +func (checker *CorrectnessChecker) copyCodeModulesFromDeprecatedBin(ctx context.Context) error { + dynakubes, err := checker.access.GetAllDynakubes(ctx) if err != nil { return err } moved := []string{} - for _, tenantConfig := range tenantConfigs { - if tenantConfig.TenantUUID == "" || tenantConfig.DownloadedCodeModuleVersion == "" { + for _, dynakube := range dynakubes { + if dynakube.TenantUUID == "" || dynakube.LatestVersion == "" { continue } - deprecatedBin := checker.path.AgentBinaryDirForVersion(tenantConfig.TenantUUID, tenantConfig.DownloadedCodeModuleVersion) - currentBin := checker.path.AgentSharedBinaryDirForAgent(tenantConfig.DownloadedCodeModuleVersion) + deprecatedBin := checker.path.AgentBinaryDirForVersion(dynakube.TenantUUID, dynakube.LatestVersion) + currentBin := checker.path.AgentSharedBinaryDirForAgent(dynakube.LatestVersion) linked, err := checker.safelyLinkCodeModule(deprecatedBin, currentBin) if err != nil { @@ -144,7 +143,7 @@ func (checker *CorrectnessChecker) copyCodeModulesFromDeprecatedBin() error { } if linked { - moved = append(moved, tenantConfig.TenantUUID+"|"+tenantConfig.DownloadedCodeModuleVersion) + moved = append(moved, dynakube.TenantUUID+"|"+dynakube.LatestVersion) } } @@ -185,6 +184,25 @@ func (checker *CorrectnessChecker) safelyLinkCodeModule(deprecatedBin, currentBi return false, nil } +func (checker *CorrectnessChecker) migrateAppMounts(ctx context.Context) { + volumes := checker.access.GetAllAppMounts(ctx) + + for _, volume := range volumes { + err := checker.access.InsertVolume(ctx, volume) + if err != nil { + log.Info("failed to insert volume", "id", volume.VolumeID, "error", err) + + continue + } + + // we need to prevent filling the DB with entries if the CSI Pod is restarted + err = checker.access.DeleteAppMount(ctx, volume.VolumeID) + if err != nil { + log.Info("failed to delete app_mount entry", "id", volume.VolumeID, "error", err) + } + } +} + func folderExists(fs afero.Fs, filename string) bool { info, err := fs.Stat(filename) if os.IsNotExist(err) { diff --git a/pkg/controllers/csi/metadata/correctness_test.go b/pkg/controllers/csi/metadata/correctness_test.go index ba24f8b412..266f5d24ec 100644 --- a/pkg/controllers/csi/metadata/correctness_test.go +++ b/pkg/controllers/csi/metadata/correctness_test.go @@ -9,46 +9,39 @@ import ( "github.com/Dynatrace/dynatrace-operator/pkg/api/scheme/fake" dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" - testutil "github.com/Dynatrace/dynatrace-operator/pkg/util/testing" - "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gorm.io/gorm" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func createTestTenantConfig(index int) *TenantConfig { - return &TenantConfig{ - Name: fmt.Sprintf("dk%d", index), - TenantUUID: fmt.Sprintf("asc%d", index), - DownloadedCodeModuleVersion: fmt.Sprintf("%d", 123*index), - MaxFailedMountAttempts: int64(index), - TimeStampedModel: TimeStampedModel{}, +func createTestDynakube(index int) Dynakube { + return Dynakube{ + TenantUUID: fmt.Sprintf("asc%d", index), + LatestVersion: strconv.Itoa(123 * index), + Name: fmt.Sprintf("dk%d", index), + ImageDigest: fmt.Sprintf("sha256:%d", 123*index), + MaxFailedMountAttempts: index, } } -func createTestAppMount(index int) *AppMount { - return &AppMount{ - VolumeMeta: VolumeMeta{ID: fmt.Sprintf("vol-%d", index), PodName: fmt.Sprintf("pod%d", index)}, - CodeModuleVersion: strconv.Itoa(123 * index), - CodeModule: CodeModule{Version: strconv.Itoa(123 * index)}, - VolumeMetaID: fmt.Sprintf("vol-%d", index), - MountAttempts: int64(index), - TimeStampedModel: TimeStampedModel{}, +func createTestVolume(index int) Volume { + return Volume{ + VolumeID: fmt.Sprintf("vol-%d", index), + PodName: fmt.Sprintf("pod%d", index), + Version: createTestDynakube(index).LatestVersion, + TenantUUID: createTestDynakube(index).TenantUUID, + MountAttempts: index, } } func TestCorrectCSI(t *testing.T) { - diffOptsAppMount := cmpopts.IgnoreFields(AppMount{}, "TimeStampedModel") - diffOptsTenantConfig := cmpopts.IgnoreFields(TenantConfig{}, "TimeStampedModel") - t.Run("error on no db or missing tables", func(t *testing.T) { db := emptyMemoryDB() checker := NewCorrectnessChecker(nil, db, dtcsi.CSIOptions{}) - err := checker.CorrectCSI(context.Background()) + err := checker.CorrectCSI(context.TODO()) require.Error(t, err) }) @@ -57,114 +50,114 @@ func TestCorrectCSI(t *testing.T) { checker := NewCorrectnessChecker(nil, db, dtcsi.CSIOptions{}) - err := checker.CorrectCSI(context.Background()) + err := checker.CorrectCSI(context.TODO()) require.NoError(t, err) }) t.Run("no error on nil apiReader, database is not cleaned", func(t *testing.T) { - testAppMount1 := createTestAppMount(1) - testTenantConfig1 := createTestTenantConfig(1) + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testDynakube1 := createTestDynakube(1) db := FakeMemoryDB() - db.CreateAppMount(testAppMount1) - db.CreateTenantConfig(testTenantConfig1) + db.InsertVolume(ctx, &testVolume1) + db.InsertDynakube(ctx, &testDynakube1) checker := NewCorrectnessChecker(nil, db, dtcsi.CSIOptions{}) - err := checker.CorrectCSI(context.Background()) + err := checker.CorrectCSI(context.TODO()) require.NoError(t, err) - appMount, err := db.ReadAppMount(*testAppMount1) + vol, err := db.GetVolume(ctx, testVolume1.VolumeID) require.NoError(t, err) - testutil.PartialEqual(t, testAppMount1, appMount, diffOptsAppMount) + assert.Equal(t, &testVolume1, vol) - tenantConfig, err := db.ReadTenantConfig(TenantConfig{Name: testTenantConfig1.Name}) require.NoError(t, err) - testutil.PartialEqual(t, testTenantConfig1, tenantConfig, diffOptsTenantConfig) + dk, err := db.GetDynakube(ctx, testDynakube1.Name) + require.NoError(t, err) + assert.Equal(t, &testDynakube1, dk) }) t.Run("nothing to remove, everything is still correct", func(t *testing.T) { - ctx := context.Background() - testAppMount1 := createTestAppMount(1) - testTenantConfig1 := createTestTenantConfig(1) + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testDynakube1 := createTestDynakube(1) db := FakeMemoryDB() - db.CreateAppMount(testAppMount1) - db.CreateTenantConfig(testTenantConfig1) + db.InsertVolume(ctx, &testVolume1) + db.InsertDynakube(ctx, &testDynakube1) client := fake.NewClient( - &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: testAppMount1.VolumeMeta.PodName}}, - &dynatracev1beta2.DynaKube{ObjectMeta: metav1.ObjectMeta{Name: testTenantConfig1.Name}}, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: testVolume1.PodName}}, + &dynatracev1beta2.DynaKube{ObjectMeta: metav1.ObjectMeta{Name: testDynakube1.Name}}, ) checker := NewCorrectnessChecker(client, db, dtcsi.CSIOptions{}) err := checker.CorrectCSI(ctx) - require.NoError(t, err) - appMount, err := db.ReadAppMount(*testAppMount1) require.NoError(t, err) - testutil.PartialEqual(t, testAppMount1, appMount, diffOptsAppMount) + vol, err := db.GetVolume(ctx, testVolume1.VolumeID) + require.NoError(t, err) + assert.Equal(t, &testVolume1, vol) require.NoError(t, err) - tenantConfig, err := db.ReadTenantConfig(TenantConfig{Name: testTenantConfig1.Name}) + dk, err := db.GetDynakube(ctx, testDynakube1.Name) require.NoError(t, err) - - testutil.PartialEqual(t, testTenantConfig1, tenantConfig, diffOptsTenantConfig) + assert.Equal(t, &testDynakube1, dk) }) t.Run("remove unnecessary entries in the filesystem", func(t *testing.T) { - testAppMount1 := createTestAppMount(1) - testAppMount2 := createTestAppMount(2) - testAppMount3 := createTestAppMount(3) + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testVolume2 := createTestVolume(2) + testVolume3 := createTestVolume(3) - testTenantConfig1 := createTestTenantConfig(1) - testTenantConfig2 := createTestTenantConfig(2) - testTenantConfig3 := createTestTenantConfig(3) + testDynakube1 := createTestDynakube(1) + testDynakube2 := createTestDynakube(2) + testDynakube3 := createTestDynakube(3) db := FakeMemoryDB() - db.CreateAppMount(testAppMount1) - db.CreateAppMount(testAppMount2) - db.CreateAppMount(testAppMount3) - db.CreateTenantConfig(testTenantConfig1) - db.CreateTenantConfig(testTenantConfig2) - db.CreateTenantConfig(testTenantConfig3) + db.InsertVolume(ctx, &testVolume1) + db.InsertVolume(ctx, &testVolume2) + db.InsertVolume(ctx, &testVolume3) + db.InsertDynakube(ctx, &testDynakube1) + db.InsertDynakube(ctx, &testDynakube2) + db.InsertDynakube(ctx, &testDynakube3) client := fake.NewClient( - &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: testAppMount1.VolumeMeta.PodName}}, - &dynatracev1beta2.DynaKube{ObjectMeta: metav1.ObjectMeta{Name: testTenantConfig1.Name}}, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: testVolume1.PodName}}, + &dynatracev1beta2.DynaKube{ObjectMeta: metav1.ObjectMeta{Name: testDynakube1.Name}}, ) checker := NewCorrectnessChecker(client, db, dtcsi.CSIOptions{}) - err := checker.CorrectCSI(context.Background()) + err := checker.CorrectCSI(ctx) require.NoError(t, err) - testAppMount1.TimeStampedModel = TimeStampedModel{} - appMount, err := db.ReadAppMount(*testAppMount1) + vol, err := db.GetVolume(ctx, testVolume1.VolumeID) require.NoError(t, err) - testutil.PartialEqual(t, &testAppMount1, &appMount, diffOptsAppMount) + assert.Equal(t, &testVolume1, vol) - tenantConfig, err := db.ReadTenantConfig(TenantConfig{Name: testTenantConfig1.Name}) + ten, err := db.GetDynakube(ctx, testDynakube1.Name) require.NoError(t, err) - testutil.PartialEqual(t, &testTenantConfig1, &tenantConfig, diffOptsTenantConfig) + assert.Equal(t, &testDynakube1, ten) // PURGED - appMount, err = db.ReadAppMount(AppMount{VolumeMetaID: testAppMount2.VolumeMetaID}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, appMount) + vol, err = db.GetVolume(ctx, testVolume2.VolumeID) + require.NoError(t, err) + assert.Nil(t, vol) // PURGED - testAppMount3.TimeStampedModel = TimeStampedModel{} - appMount, err = db.ReadAppMount(*testAppMount3) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, appMount) + vol, err = db.GetVolume(ctx, testVolume3.VolumeID) + require.NoError(t, err) + assert.Nil(t, vol) // PURGED - tenantConfig, err = db.ReadTenantConfig(TenantConfig{Name: testTenantConfig2.TenantUUID}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, tenantConfig) + ten, err = db.GetDynakube(ctx, testDynakube2.TenantUUID) + require.NoError(t, err) + assert.Nil(t, ten) // PURGED - tenantConfig, err = db.ReadTenantConfig(TenantConfig{Name: testTenantConfig3.TenantUUID}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, tenantConfig) + ten, err = db.GetDynakube(ctx, testDynakube3.TenantUUID) + require.NoError(t, err) + assert.Nil(t, ten) }) } diff --git a/pkg/controllers/csi/metadata/fakes.go b/pkg/controllers/csi/metadata/fakes.go index 70f2bf1421..da034b8ff2 100644 --- a/pkg/controllers/csi/metadata/fakes.go +++ b/pkg/controllers/csi/metadata/fakes.go @@ -3,118 +3,115 @@ package metadata import ( "context" "database/sql" - - "gorm.io/driver/sqlite" - "gorm.io/gorm" ) -func emptyMemoryDB() *GormConn { - db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{}) - if err != nil { - return nil - } +func emptyMemoryDB() *SqliteAccess { + db := SqliteAccess{} + _ = db.connect(sqliteDriverName, ":memory:") + + return &db +} + +func FakeMemoryDB() *SqliteAccess { + db := SqliteAccess{} + ctx := context.Background() + _ = db.Setup(ctx, ":memory:") + _ = db.createTables(ctx) - return &GormConn{ctx: context.Background(), db: db} + return &db } -func FakeMemoryDB() *GormConn { - db, err := gorm.Open(sqlite.Open("file::memory:"), &gorm.Config{}) +func checkIfTablesExist(db *SqliteAccess) bool { + var volumesTable string + + row := db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", volumesTableName) + + err := row.Scan(&volumesTable) if err != nil { - return nil + return false } - gormConn := &GormConn{ctx: context.Background(), db: db} + var tenantsTable string + + row = db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", dynakubesTableName) - err = gormConn.InitGormSchema() + err = row.Scan(&tenantsTable) if err != nil { - log.Error(err, "Couldn't initialize GORM schema") + return false + } - return nil + if tenantsTable != dynakubesTableName || volumesTable != volumesTableName { + return false } - return gormConn + return true } type FakeFailDB struct{} -func (f *FakeFailDB) SchemaMigration() error { +func (f *FakeFailDB) Setup(_ context.Context, _ string) error { return sql.ErrTxDone } +func (f *FakeFailDB) InsertDynakube(_ context.Context, _ *Dynakube) error { return sql.ErrTxDone } - -func (f *FakeFailDB) ReadTenantConfig(tenantConfig TenantConfig) (*TenantConfig, error) { - return nil, sql.ErrTxDone -} -func (f *FakeFailDB) ReadCodeModule(codeModule CodeModule) (*CodeModule, error) { - return nil, sql.ErrTxDone +func (f *FakeFailDB) UpdateDynakube(_ context.Context, _ *Dynakube) error { + return sql.ErrTxDone } -func (f *FakeFailDB) ReadOSMount(osMount OSMount) (*OSMount, error) { - return nil, sql.ErrTxDone +func (f *FakeFailDB) DeleteDynakube(_ context.Context, _ string) error { + return sql.ErrTxDone } -func (f *FakeFailDB) ReadUnscopedOSMount(osMount OSMount) (*OSMount, error) { +func (f *FakeFailDB) GetDynakube(_ context.Context, _ string) (*Dynakube, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) ReadVolumeMeta(volumeMeta VolumeMeta) (*VolumeMeta, error) { +func (f *FakeFailDB) GetTenantsToDynakubes(_ context.Context) (map[string]string, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) ReadAppMount(appMount AppMount) (*AppMount, error) { +func (f *FakeFailDB) GetAllDynakubes(_ context.Context) ([]*Dynakube, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) ReadTenantConfigs() ([]TenantConfig, error) { - return nil, sql.ErrTxDone +func (f *FakeFailDB) InsertOsAgentVolume(_ context.Context, _ *OsAgentVolume) error { + return sql.ErrTxDone } -func (f *FakeFailDB) ReadCodeModules() ([]CodeModule, error) { +func (f *FakeFailDB) GetOsAgentVolumeViaVolumeID(_ context.Context, _ string) (*OsAgentVolume, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) ReadOSMounts() ([]OSMount, error) { +func (f *FakeFailDB) GetOsAgentVolumeViaTenantUUID(_ context.Context, _ string) (*OsAgentVolume, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) ReadAppMounts() ([]AppMount, error) { - return nil, sql.ErrTxDone +func (f *FakeFailDB) UpdateOsAgentVolume(_ context.Context, _ *OsAgentVolume) error { + return sql.ErrTxDone } -func (f *FakeFailDB) ReadVolumeMetas() ([]VolumeMeta, error) { +func (f *FakeFailDB) GetAllOsAgentVolumes(_ context.Context) ([]*OsAgentVolume, error) { return nil, sql.ErrTxDone } -func (f *FakeFailDB) CreateTenantConfig(tenantConfig *TenantConfig) error { - return sql.ErrTxDone -} -func (f *FakeFailDB) CreateCodeModule(codeModule *CodeModule) error { - return sql.ErrTxDone +func (f *FakeFailDB) InsertVolume(_ context.Context, _ *Volume) error { return sql.ErrTxDone } +func (f *FakeFailDB) DeleteVolume(_ context.Context, _ string) error { return sql.ErrTxDone } +func (f *FakeFailDB) GetVolume(_ context.Context, _ string) (*Volume, error) { + return nil, sql.ErrTxDone } -func (f *FakeFailDB) CreateOSMount(osMount *OSMount) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetAllVolumes(_ context.Context) ([]*Volume, error) { return nil, sql.ErrTxDone } +func (f *FakeFailDB) GetPodNames(_ context.Context) (map[string]string, error) { + return nil, sql.ErrTxDone } -func (f *FakeFailDB) CreateAppMount(appMount *AppMount) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetUsedVersions(_ context.Context, _ string) (map[string]bool, error) { + return nil, sql.ErrTxDone } - -func (f *FakeFailDB) UpdateTenantConfig(tenantConfig *TenantConfig) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetAllUsedVersions(_ context.Context) (map[string]bool, error) { + return nil, sql.ErrTxDone } -func (f *FakeFailDB) UpdateOSMount(osMount *OSMount) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetLatestVersions(_ context.Context) (map[string]bool, error) { + return nil, sql.ErrTxDone } -func (f *FakeFailDB) UpdateAppMount(appMount *AppMount) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetAllAppMounts(_ context.Context) []*Volume { + return nil } +func (f *FakeFailDB) DeleteAppMount(_ context.Context, _ string) error { return nil } -func (f *FakeFailDB) DeleteTenantConfig(tenantConfig *TenantConfig, cascade bool) error { - return sql.ErrTxDone -} -func (f *FakeFailDB) DeleteCodeModule(codeModule *CodeModule) error { - return sql.ErrTxDone -} -func (f *FakeFailDB) DeleteOSMount(osMount *OSMount) error { - return sql.ErrTxDone -} -func (f *FakeFailDB) DeleteAppMount(appMount *AppMount) error { - return sql.ErrTxDone +func (f *FakeFailDB) GetUsedImageDigests(_ context.Context) (map[string]bool, error) { + return nil, sql.ErrTxDone } -func (f *FakeFailDB) IsCodeModuleOrphaned(codeModule *CodeModule) (bool, error) { +func (f *FakeFailDB) IsImageDigestUsed(_ context.Context, _ string) (bool, error) { return false, sql.ErrTxDone } -func (f *FakeFailDB) RestoreOSMount(osMount *OSMount) (*OSMount, error) { - return nil, sql.ErrTxDone -} diff --git a/pkg/controllers/csi/metadata/metadata.go b/pkg/controllers/csi/metadata/metadata.go new file mode 100644 index 0000000000..db14636269 --- /dev/null +++ b/pkg/controllers/csi/metadata/metadata.go @@ -0,0 +1,143 @@ +package metadata + +import ( + "context" + "time" +) + +// Dynakube stores the necessary info from the Dynakube that is needed to be used during volume mount/unmount. +type Dynakube struct { + Name string `json:"name"` + TenantUUID string `json:"tenantUUID"` + LatestVersion string `json:"latestVersion"` + ImageDigest string `json:"imageDigest"` + MaxFailedMountAttempts int `json:"maxFailedMountAttempts"` +} + +// NewDynakube returns a new metadata.Dynakube if all fields are set. +func NewDynakube(dynakubeName, tenantUUID, latestVersion, imageDigest string, maxFailedMountAttempts int) *Dynakube { + if tenantUUID == "" || dynakubeName == "" { + return nil + } + + return &Dynakube{ + Name: dynakubeName, + TenantUUID: tenantUUID, + LatestVersion: latestVersion, + ImageDigest: imageDigest, + MaxFailedMountAttempts: maxFailedMountAttempts, + } +} + +type Volume struct { + VolumeID string `json:"volumeID" gorm:"column:ID"` + PodName string `json:"podName"` + Version string `json:"version"` + TenantUUID string `json:"tenantUUID"` + MountAttempts int `json:"mountAttempts"` +} + +// NewVolume returns a new Volume if all fields (except version) are set. +func NewVolume(id, podName, version, tenantUUID string, mountAttempts int) *Volume { + if id == "" || podName == "" || tenantUUID == "" { + return nil + } + + if mountAttempts < 0 { + mountAttempts = 0 + } + + return &Volume{ + VolumeID: id, + PodName: podName, + Version: version, + TenantUUID: tenantUUID, + MountAttempts: mountAttempts, + } +} + +type OsAgentVolume struct { + LastModified *time.Time `json:"lastModified"` + VolumeID string `json:"volumeID"` + TenantUUID string `json:"tenantUUID"` + Mounted bool `json:"mounted"` +} + +// NewOsAgentVolume returns a new volume if all fields are set. +func NewOsAgentVolume(volumeID, tenantUUID string, mounted bool, timeStamp *time.Time) *OsAgentVolume { + if volumeID == "" || tenantUUID == "" || timeStamp == nil { + return nil + } + + return &OsAgentVolume{VolumeID: volumeID, TenantUUID: tenantUUID, Mounted: mounted, LastModified: timeStamp} +} + +type Access interface { + Setup(ctx context.Context, path string) error + + InsertDynakube(ctx context.Context, dynakube *Dynakube) error + UpdateDynakube(ctx context.Context, dynakube *Dynakube) error + DeleteDynakube(ctx context.Context, dynakubeName string) error + GetDynakube(ctx context.Context, dynakubeName string) (*Dynakube, error) + GetTenantsToDynakubes(ctx context.Context) (map[string]string, error) + GetAllDynakubes(ctx context.Context) ([]*Dynakube, error) + + InsertOsAgentVolume(ctx context.Context, volume *OsAgentVolume) error + GetOsAgentVolumeViaVolumeID(ctx context.Context, volumeID string) (*OsAgentVolume, error) + GetOsAgentVolumeViaTenantUUID(ctx context.Context, volumeID string) (*OsAgentVolume, error) + UpdateOsAgentVolume(ctx context.Context, volume *OsAgentVolume) error + GetAllOsAgentVolumes(ctx context.Context) ([]*OsAgentVolume, error) + + InsertVolume(ctx context.Context, volume *Volume) error + DeleteVolume(ctx context.Context, volumeID string) error + GetVolume(ctx context.Context, volumeID string) (*Volume, error) + GetAllVolumes(ctx context.Context) ([]*Volume, error) + GetPodNames(ctx context.Context) (map[string]string, error) + GetUsedVersions(ctx context.Context, tenantUUID string) (map[string]bool, error) + GetAllUsedVersions(ctx context.Context) (map[string]bool, error) + GetLatestVersions(ctx context.Context) (map[string]bool, error) + GetUsedImageDigests(ctx context.Context) (map[string]bool, error) + IsImageDigestUsed(ctx context.Context, imageDigest string) (bool, error) + GetAllAppMounts(ctx context.Context) []*Volume + DeleteAppMount(ctx context.Context, appMountName string) error +} + +type AccessOverview struct { + Volumes []*Volume `json:"volumes"` + Dynakubes []*Dynakube `json:"dynakubes"` + OsAgentVolumes []*OsAgentVolume `json:"osAgentVolumes"` +} + +func NewAccessOverview(access Access) (*AccessOverview, error) { + ctx := context.Background() + + volumes, err := access.GetAllVolumes(ctx) + if err != nil { + return nil, err + } + + dynakubes, err := access.GetAllDynakubes(ctx) + if err != nil { + return nil, err + } + + osVolumes, err := access.GetAllOsAgentVolumes(ctx) + if err != nil { + return nil, err + } + + return &AccessOverview{ + Volumes: volumes, + Dynakubes: dynakubes, + OsAgentVolumes: osVolumes, + }, nil +} + +func LogAccessOverview(access Access) { + overview, err := NewAccessOverview(access) + if err != nil { + log.Error(err, "Failed to get an overview of the stored csi metadata") + } + + log.Info("The current overview of the csi metadata", "overview", overview) +} diff --git a/pkg/controllers/csi/metadata/metadata_test.go b/pkg/controllers/csi/metadata/metadata_test.go new file mode 100644 index 0000000000..50add01477 --- /dev/null +++ b/pkg/controllers/csi/metadata/metadata_test.go @@ -0,0 +1,79 @@ +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +const ( + testName = "test-name" + testID = "test-id" + testUUID = "test-uuid" + testVersion = "test-version" + testDigest = "test-digest" + testMaxFailedMountAttempts = 3 + testMountAttempts = 1 +) + +func TestNewDynakube(t *testing.T) { + t.Run("initializes correctly", func(t *testing.T) { + dynakube := NewDynakube(testName, testUUID, testVersion, testDigest, testMaxFailedMountAttempts) + + assert.Equal(t, testName, dynakube.Name) + assert.Equal(t, testUUID, dynakube.TenantUUID) + assert.Equal(t, testVersion, dynakube.LatestVersion) + assert.Equal(t, testDigest, dynakube.ImageDigest) + assert.Equal(t, testMaxFailedMountAttempts, dynakube.MaxFailedMountAttempts) + }) + t.Run("returns nil if name or uuid is empty", func(t *testing.T) { + dynakube := NewDynakube("", testUUID, testVersion, testDigest, testMaxFailedMountAttempts) + + assert.Nil(t, dynakube) + + dynakube = NewDynakube(testName, "", testVersion, testDigest, testMaxFailedMountAttempts) + + assert.Nil(t, dynakube) + }) +} + +func TestNewVolume(t *testing.T) { + t.Run("initializes correctly", func(t *testing.T) { + volume := NewVolume(testID, testName, testVersion, testUUID, testMountAttempts) + + assert.Equal(t, testID, volume.VolumeID) + assert.Equal(t, testName, volume.PodName) + assert.Equal(t, testVersion, volume.Version) + assert.Equal(t, testUUID, volume.TenantUUID) + assert.Equal(t, testMountAttempts, volume.MountAttempts) + }) + t.Run("returns nil if id, name, or uuid is unset", func(t *testing.T) { + volume := NewVolume("", testName, testVersion, testUUID, testMountAttempts) + + assert.Nil(t, volume) + + volume = NewVolume(testID, "", testVersion, testUUID, testMountAttempts) + + assert.Nil(t, volume) + + volume = NewVolume(testID, testName, testVersion, "", testMountAttempts) + + assert.Nil(t, volume) + + volume = NewVolume(testID, testName, testVersion, testUUID, 0) + + assert.NotNil(t, volume) + assert.Equal(t, 0, volume.MountAttempts) + }) + t.Run("sets default value for mount attempts if less than 0", func(t *testing.T) { + volume := NewVolume(testID, testName, testVersion, testUUID, -1) + + assert.NotNil(t, volume) + assert.Equal(t, 0, volume.MountAttempts) + + volume = NewVolume(testID, testName, testVersion, testUUID, -2) + + assert.NotNil(t, volume) + assert.Equal(t, 0, volume.MountAttempts) + }) +} diff --git a/pkg/controllers/csi/metadata/migrations.go b/pkg/controllers/csi/metadata/migrations.go deleted file mode 100644 index 2c6b89b71c..0000000000 --- a/pkg/controllers/csi/metadata/migrations.go +++ /dev/null @@ -1,192 +0,0 @@ -package metadata - -import ( - "time" - - dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" - "gorm.io/gorm" -) - -// Dynakube stores the necessary info from the Dynakube that is needed to be used during volume mount/unmount. -type Dynakube struct { - Name string `json:"name"` - TenantUUID string `json:"tenantUUID"` - LatestVersion string `json:"latestVersion"` - ImageDigest string `json:"imageDigest"` - MaxFailedMountAttempts int `json:"maxFailedMountAttempts"` -} - -type Volume struct { - VolumeID string `json:"volumeID" gorm:"column:ID"` - PodName string `json:"podName"` - Version string `json:"version"` - TenantUUID string `json:"tenantUUID"` - MountAttempts int `json:"mountAttempts"` -} - -type OsAgentVolume struct { - LastModified *time.Time `json:"lastModified"` - VolumeID string `json:"volumeID"` - TenantUUID string `json:"tenantUUID"` - Mounted bool `json:"mounted"` -} - -func migrateDynakubes(tx *gorm.DB) error { - var dynakubes []Dynakube - - tx.Table("dynakubes").Find(&dynakubes) - - pr := PathResolver{RootDir: dtcsi.DataPath} - - for _, d := range dynakubes { - var version string - if d.LatestVersion != "" { - version = d.LatestVersion - } else if d.ImageDigest != "" { - version = d.ImageDigest - } - - tc := TenantConfig{ - Name: d.Name, - TenantUUID: d.TenantUUID, - ConfigDirPath: pr.AgentConfigDir(d.TenantUUID, d.Name), - DownloadedCodeModuleVersion: version, - MaxFailedMountAttempts: int64(d.MaxFailedMountAttempts), - } - - result := tx.Create(&tc) - if result.Error != nil { - log.Error(result.Error, "failed to create TenantConfig") - } - - cm := CodeModule{ - Version: version, - Location: pr.AgentSharedBinaryDirForAgent(version), - } - tx.Create(&cm) - - if result.Error != nil { - log.Error(result.Error, "failed to create CodeModule") - } - } - - return nil -} - -func migrateVolumes(tx *gorm.DB) error { - // Old `Volumes` tables is where we store the information about the mounted volumes that are used - // for Application monitoring (codemodules) - // the reason the names is so generic is that originally that was the only kind of volume - // - // New `AppMount` table is where we store information that is ONLY relevant for volumes that - // are for Application monitoring. - var volumes []Volume - - pr := PathResolver{RootDir: dtcsi.DataPath} - - tx.Table("volumes").Find(&volumes) - - for _, v := range volumes { - vm := VolumeMeta{ - ID: v.VolumeID, - PodUid: "", - PodName: v.PodName, - PodNamespace: "", - } - - result := tx.Create(&vm) - if result.Error != nil { - log.Error(result.Error, "failed to create VolumeMeta") - } - - am := AppMount{ - CodeModuleVersion: v.Version, - VolumeMetaID: vm.ID, - Location: pr.AgentRunDirForVolume(v.TenantUUID, vm.ID), - MountAttempts: int64(v.MountAttempts), - } - - result = tx.Create(&am) - if result.Error != nil { - log.Error(result.Error, "failed to create AppMount") - } - } - - return nil -} - -func migrateOsAgentVolumes(tx *gorm.DB) error { - // Old `OsAgentVolume` table is where we store the information about the mounted volumes that - // are used for the OsAgent this was just bolted on, - // because they need to be handled differently (and was not properly finished) - // - // New `OsMount` table is where we store information that is ONLY relevant for volumes that are - // for the OsAgent. - var osAgentVolumnes []OsAgentVolume - - pr := PathResolver{RootDir: dtcsi.DataPath} - - tx.Table("osagent_volumes").Find(&osAgentVolumnes) - - for _, ov := range osAgentVolumnes { - if !ov.Mounted { - continue - } - - vm := VolumeMeta{ - ID: ov.VolumeID, - } - - result := tx.Create(&vm) - if result.Error != nil { - log.Error(result.Error, "failed to create VolumeMeta") - } - - var mountAttempts int64 - if ov.Mounted { - mountAttempts = 1 - } - - // This is a workaround for not having enough information in the current database tables to migrate 100% correctly. - // This is fine as we don't currently use this information for anything. - tc := TenantConfig{TenantUUID: ov.TenantUUID} - tx.First(&tc) - - om := OSMount{ - TenantConfigUID: tc.UID, - TenantUUID: ov.TenantUUID, - VolumeMetaID: vm.ID, - Location: pr.AgentRunDirForVolume(ov.TenantUUID, vm.ID), - MountAttempts: mountAttempts, - } - result = tx.Create(&om) - - if result.Error != nil { - log.Error(result.Error, "failed to create OSMount") - } - } - - return nil -} - -func dataMigration(tx *gorm.DB) error { - if err := migrateDynakubes(tx); err != nil { - return err - } - - if err := migrateVolumes(tx); err != nil { - return err - } - - return migrateOsAgentVolumes(tx) -} - -func removeOldTables(tx *gorm.DB) error { - for _, table := range []string{"dynakubes", "volumes", "osagent_volumes"} { - if err := tx.Migrator().DropTable(table); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/controllers/csi/metadata/models.go b/pkg/controllers/csi/metadata/models.go deleted file mode 100644 index 7ddfc4c472..0000000000 --- a/pkg/controllers/csi/metadata/models.go +++ /dev/null @@ -1,71 +0,0 @@ -package metadata - -import ( - "time" - - "github.com/google/uuid" - "gorm.io/gorm" -) - -type TimeStampedModel struct { - CreatedAt time.Time `json:"createdAt"` - UpdatedAt time.Time `json:"updatedAt"` - DeletedAt gorm.DeletedAt `gorm:"index" json:"deletedAt,omitempty"` -} - -// TenantConfig holds info about a given configuration for a tenant. -type TenantConfig struct { - TimeStampedModel - UID string `gorm:"primaryKey"` // auto generated UID - Name string `gorm:"not null"` - DownloadedCodeModuleVersion string // can't be foreign key because of HostMonitoring edge case - ConfigDirPath string `gorm:"not null"` - TenantUUID string `gorm:"not null"` - MaxFailedMountAttempts int64 `gorm:"default:10"` -} - -func (tc *TenantConfig) BeforeCreate(_ *gorm.DB) error { - tc.UID = uuid.NewString() - - return nil -} - -// CodeModule holds what codemodules we have downloaded and available. -type CodeModule struct { - Version string `gorm:"primaryKey"` - Location string `gorm:"not null"` - TimeStampedModel -} - -// OSMount keeps track of our mounts to OS oneAgents, can be "remounted", which causes annoyances. -type OSMount struct { - VolumeMeta VolumeMeta `gorm:"foreignKey:VolumeMetaID"` - TimeStampedModel - TenantConfigUID string - TenantUUID string `gorm:"primaryKey"` - VolumeMetaID string `gorm:"not null"` - Location string `gorm:"not null"` - TenantConfig TenantConfig `gorm:"foreignKey:TenantConfigUID"` - MountAttempts int64 `gorm:"not null"` -} - -// AppMount keeps track of our mounts to user applications, where we provide the codemodules. -type AppMount struct { - VolumeMeta VolumeMeta - CodeModule CodeModule `gorm:"foreignKey:CodeModuleVersion;constraint:OnUpdate:CASCADE,OnDelete:SET NULL;"` - TimeStampedModel - VolumeMetaID string `gorm:"primaryKey"` - CodeModuleVersion string - Location string `gorm:"not null"` - MountAttempts int64 `gorm:"not null"` -} - -// VolumeMeta keeps metadata we get from kubernetes about the volume. -type VolumeMeta struct { - ID string `gorm:"primaryKey"` - PodUid string `gorm:"not null"` - PodName string `gorm:"not null"` - PodNamespace string `gorm:"not null"` - PodServiceAccount string `gorm:"not null"` - TimeStampedModel -} diff --git a/pkg/controllers/csi/metadata/path_resolver.go b/pkg/controllers/csi/metadata/path_resolver.go index a9257bfb96..da9d9e9488 100644 --- a/pkg/controllers/csi/metadata/path_resolver.go +++ b/pkg/controllers/csi/metadata/path_resolver.go @@ -40,8 +40,8 @@ func (pr PathResolver) AgentTempUnzipDir() string { return filepath.Join(pr.AgentTempUnzipRootDir(), "opt", "dynatrace", "oneagent") } -func (pr PathResolver) AgentSharedBinaryDirForAgent(versionOrImageURI string) string { - return filepath.Join(pr.AgentSharedBinaryDirBase(), versionOrImageURI) +func (pr PathResolver) AgentSharedBinaryDirForAgent(versionOrDigest string) string { + return filepath.Join(pr.AgentSharedBinaryDirBase(), versionOrDigest) } func (pr PathResolver) AgentConfigDir(tenantUUID string, dynakubeName string) string { diff --git a/pkg/controllers/csi/metadata/sqlite.go b/pkg/controllers/csi/metadata/sqlite.go new file mode 100644 index 0000000000..1a75808c1e --- /dev/null +++ b/pkg/controllers/csi/metadata/sqlite.go @@ -0,0 +1,814 @@ +package metadata + +import ( + "context" + "database/sql" + "strconv" + "strings" + "time" + + dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta1/dynakube" + "github.com/mattn/go-sqlite3" + "github.com/pkg/errors" +) + +var ( + dynakubesAlterStatementMaxFailedMountAttempts = ` + ALTER TABLE dynakubes + ADD COLUMN MaxFailedMountAttempts INT NOT NULL DEFAULT ` + strconv.FormatInt(dynatracev1beta2.DefaultMaxFailedCsiMountAttempts, 10) + ";" + // "Not null"-columns need a default value set +) + +const ( + sqliteDriverName = "sqlite3" + + // CREATE + dynakubesTableName = "dynakubes" + dynakubesCreateStatement = ` + CREATE TABLE IF NOT EXISTS dynakubes ( + Name VARCHAR NOT NULL, + TenantUUID VARCHAR NOT NULL, + LatestVersion VARCHAR NOT NULL, + PRIMARY KEY (Name) + ); ` + + volumesTableName = "volumes" + volumesCreateStatement = ` + CREATE TABLE IF NOT EXISTS volumes ( + ID VARCHAR NOT NULL, + PodName VARCHAR NOT NULL, + Version VARCHAR NOT NULL, + TenantUUID VARCHAR NOT NULL, + PRIMARY KEY (ID) + );` + + osAgentVolumesTableName = "osagent_volumes" + osAgentVolumesCreateStatement = ` + CREATE TABLE IF NOT EXISTS osagent_volumes ( + TenantUUID VARCHAR NOT NULL, + VolumeID VARCHAR NOT NULL, + Mounted BOOLEAN NOT NULL, + LastModified DATETIME NOT NULL, + PRIMARY KEY (TenantUUID) + );` + + // ALTER + dynakubesAlterStatementImageDigestColumn = ` + ALTER TABLE dynakubes + ADD COLUMN ImageDigest VARCHAR NOT NULL DEFAULT ''; + ` + + volumesAlterStatementMountAttempts = ` + ALTER TABLE volumes + ADD COLUMN MountAttempts INT NOT NULL DEFAULT 0;` + + // INSERT + insertDynakubeStatement = ` + INSERT INTO dynakubes (Name, TenantUUID, LatestVersion, ImageDigest, MaxFailedMountAttempts) + VALUES (?,?,?,?, ?); + ` + + insertVolumeStatement = ` + INSERT INTO volumes (ID, PodName, Version, TenantUUID, MountAttempts) + VALUES (?,?,?,?,?) + ON CONFLICT(ID) DO UPDATE SET + PodName=excluded.PodName, + Version=excluded.Version, + TenantUUID=excluded.TenantUUID, + MountAttempts=excluded.MountAttempts; + ` + + insertOsAgentVolumeStatement = ` + INSERT INTO osagent_volumes (TenantUUID, VolumeID, Mounted, LastModified) + VALUES (?,?,?,?); + ` + + // UPDATE + updateDynakubeStatement = ` + UPDATE dynakubes + SET LatestVersion = ?, TenantUUID = ?, ImageDigest = ?, MaxFailedMountAttempts = ? + WHERE Name = ?; + ` + + updateOsAgentVolumeStatement = ` + UPDATE osagent_volumes + SET VolumeID = ?, Mounted = ?, LastModified = ? + WHERE TenantUUID = ?; + ` + + // GET + getDynakubeStatement = ` + SELECT TenantUUID, LatestVersion, ImageDigest, MaxFailedMountAttempts + FROM dynakubes + WHERE Name = ?; + ` + + getVolumeStatement = ` + SELECT PodName, Version, TenantUUID, MountAttempts + FROM volumes + WHERE ID = ?; + ` + + getOsAgentVolumeViaVolumeIDStatement = ` + SELECT TenantUUID, Mounted, LastModified + FROM osagent_volumes + WHERE VolumeID = ?; + ` + + getOsAgentVolumeViaTenantUUIDStatement = ` + SELECT VolumeID, Mounted, LastModified + FROM osagent_volumes + WHERE TenantUUID = ?; + ` + + // GET ALL + getAllDynakubesStatement = ` + SELECT Name, TenantUUID, LatestVersion, ImageDigest, MaxFailedMountAttempts + FROM dynakubes; + ` + + getAllVolumesStatement = ` + SELECT ID, PodName, Version, TenantUUID, MountAttempts + FROM volumes; + ` + + getAllOsAgentVolumes = ` + SELECT TenantUUID, VolumeID, Mounted, LastModified + FROM osagent_volumes; + ` + + // DELETE + deleteVolumeStatement = "DELETE FROM volumes WHERE ID = ?;" + + deleteDynakubeStatement = "DELETE FROM dynakubes WHERE Name = ?;" + + deleteAppMountStatement = "DELETE FROM app_mounts WHERE volume_meta_id = ?;" + + // SPECIAL + getUsedVersionsStatement = ` + SELECT DISTINCT Version + FROM volumes + WHERE TenantUUID = ?; + ` + + getAllUsedVersionsStatement = ` + SELECT DISTINCT Version + FROM volumes; + ` + + getUsedImageDigestStatement = ` + SELECT DISTINCT ImageDigest + FROM dynakubes + WHERE ImageDigest != ""; + ` + + getLatestVersionsStatement = ` + SELECT DISTINCT LatestVersion + FROM dynakubes; + ` + + getPodNamesStatement = ` + SELECT ID, PodName + FROM volumes; + ` + + getTenantsToDynakubesStatement = ` + SELECT tenantUUID, Name + FROM dynakubes; + ` + + countImageDigestStatement = ` + SELECT COUNT(*) + FROM dynakubes + WHERE ImageDigest = ?; + ` + + getAllAppMountsStatement = ` + SELECT volume_meta_id, code_module_version, location, mount_attempts, pod_name + FROM app_mounts + INNER JOIN volume_meta + WHERE volume_meta.id = app_mounts.volume_meta_id AND app_mounts.deleted_at IS NULL; + ` +) + +type SqliteAccess struct { + conn *sql.DB +} + +// NewAccess creates a new SqliteAccess, connects to the database. +func NewAccess(ctx context.Context, path string) (Access, error) { + access := SqliteAccess{} + + err := access.Setup(ctx, path) + if err != nil { + log.Error(err, "failed to connect to the database") + + return nil, err + } + + return &access, nil +} + +func (access *SqliteAccess) connect(driver, path string) error { + db, err := sql.Open(driver, path) + if err != nil { + err := errors.WithStack(errors.WithMessagef(err, "couldn't connect to db %s", path)) + access.conn = nil + + return err + } + + access.conn = db + + return nil +} + +func (access *SqliteAccess) createTables(ctx context.Context) error { + err := access.setupDynakubeTable(ctx) + if err != nil { + return err + } + + err = access.setupVolumeTable(ctx) + if err != nil { + return err + } + + if _, err := access.conn.ExecContext(ctx, osAgentVolumesCreateStatement); err != nil { + return errors.WithStack(errors.WithMessagef(err, "couldn't create the table %s", osAgentVolumesTableName)) + } + + return nil +} + +func (access *SqliteAccess) setupVolumeTable(ctx context.Context) error { + _, err := access.conn.Exec(volumesCreateStatement) + if err != nil { + return errors.WithMessagef(err, "couldn't create the table %s", volumesTableName) + } + + err = access.executeAlterStatement(ctx, volumesAlterStatementMountAttempts) + if err != nil { + return err + } + + return nil +} + +// setupDynakubeTable creates the dynakubes table if it doesn't exist and tries to add additional columns +func (access *SqliteAccess) setupDynakubeTable(ctx context.Context) error { + if _, err := access.conn.Exec(dynakubesCreateStatement); err != nil { + return errors.WithStack(errors.WithMessagef(err, "couldn't create the table %s", dynakubesTableName)) + } + + err := access.executeAlterStatement(ctx, dynakubesAlterStatementImageDigestColumn) + if err != nil { + return err + } + + err = access.executeAlterStatement(ctx, dynakubesAlterStatementMaxFailedMountAttempts) + if err != nil { + return err + } + + return nil +} + +func (access *SqliteAccess) executeAlterStatement(ctx context.Context, statement string) error { + if _, err := access.conn.ExecContext(ctx, statement); err != nil { + sqliteErr := sqlite3.Error{} + isSqliteErr := errors.As(err, &sqliteErr) + + if isSqliteErr && sqliteErr.Code != sqlite3.ErrError { + return errors.WithStack(err) + } + } + + return nil +} + +// Setup connects to the database and creates the necessary tables if they don't exist +func (access *SqliteAccess) Setup(ctx context.Context, path string) error { + if err := access.connect(sqliteDriverName, path); err != nil { + return err + } + + if err := access.createTables(ctx); err != nil { + return err + } + + return nil +} + +// InsertDynakube inserts a new Dynakube +func (access *SqliteAccess) InsertDynakube(ctx context.Context, dynakube *Dynakube) error { + err := access.executeStatement(ctx, insertDynakubeStatement, dynakube.Name, dynakube.TenantUUID, dynakube.LatestVersion, dynakube.ImageDigest, dynakube.MaxFailedMountAttempts) + if err != nil { + err = errors.WithMessagef(err, "couldn't insert dynakube entry, tenantUUID '%s', latest version '%s', name '%s', image digest '%s'", + dynakube.TenantUUID, + dynakube.LatestVersion, + dynakube.Name, + dynakube.ImageDigest) + } + + return err +} + +// UpdateDynakube updates an existing Dynakube by matching the name +func (access *SqliteAccess) UpdateDynakube(ctx context.Context, dynakube *Dynakube) error { + err := access.executeStatement(ctx, updateDynakubeStatement, dynakube.LatestVersion, dynakube.TenantUUID, dynakube.ImageDigest, dynakube.MaxFailedMountAttempts, dynakube.Name) + if err != nil { + err = errors.WithMessagef(err, "couldn't update dynakube, tenantUUID '%s', latest version '%s', name '%s', image digest '%s'", + dynakube.TenantUUID, + dynakube.LatestVersion, + dynakube.Name, + dynakube.ImageDigest) + } + + return err +} + +// DeleteDynakube deletes an existing Dynakube using its name +func (access *SqliteAccess) DeleteDynakube(ctx context.Context, dynakubeName string) error { + err := access.executeStatement(ctx, deleteDynakubeStatement, dynakubeName) + if err != nil { + err = errors.WithMessagef(err, "couldn't delete dynakube, name '%s'", dynakubeName) + } + + return err +} + +// GetDynakube gets Dynakube using its name +func (access *SqliteAccess) GetDynakube(ctx context.Context, dynakubeName string) (*Dynakube, error) { + var tenantUUID string + + var latestVersion string + + var imageDigest string + + var maxFailedMountAttempts int + + err := access.querySimpleStatement(ctx, getDynakubeStatement, dynakubeName, &tenantUUID, &latestVersion, &imageDigest, &maxFailedMountAttempts) + if err != nil { + err = errors.WithMessagef(err, "couldn't get dynakube, name '%s'", dynakubeName) + } + + return NewDynakube(dynakubeName, tenantUUID, latestVersion, imageDigest, maxFailedMountAttempts), err +} + +// InsertVolume inserts a new Volume +func (access *SqliteAccess) InsertVolume(ctx context.Context, volume *Volume) error { + err := access.executeStatement(ctx, insertVolumeStatement, volume.VolumeID, volume.PodName, volume.Version, volume.TenantUUID, volume.MountAttempts) + if err != nil { + err = errors.WithMessagef(err, "couldn't insert volume info, volume id '%s', pod '%s', version '%s', dynakube '%s'", + volume.VolumeID, + volume.PodName, + volume.Version, + volume.TenantUUID) + } + + return err +} + +// GetVolume gets Volume by its ID +func (access *SqliteAccess) GetVolume(ctx context.Context, volumeID string) (*Volume, error) { + var podName string + + var version string + + var tenantUUID string + + var mountAttempts int + + err := access.querySimpleStatement(ctx, getVolumeStatement, volumeID, &podName, &version, &tenantUUID, &mountAttempts) + if err != nil { + err = errors.WithMessagef(err, "couldn't get volume field for volume id '%s'", volumeID) + } + + return NewVolume(volumeID, podName, version, tenantUUID, mountAttempts), err +} + +// DeleteVolume deletes a Volume by its ID +func (access *SqliteAccess) DeleteVolume(ctx context.Context, volumeID string) error { + err := access.executeStatement(ctx, deleteVolumeStatement, volumeID) + if err != nil { + err = errors.WithMessagef(err, "couldn't delete volume for volume id '%s'", volumeID) + } + + return err +} + +// InsertOsAgentVolume inserts a new OsAgentVolume +func (access *SqliteAccess) InsertOsAgentVolume(ctx context.Context, volume *OsAgentVolume) error { + err := access.executeStatement(ctx, insertOsAgentVolumeStatement, volume.TenantUUID, volume.VolumeID, volume.Mounted, volume.LastModified) + if err != nil { + err = errors.WithMessagef(err, "couldn't insert osAgentVolume info, volume id '%s', tenant UUID '%s', mounted '%t', last modified '%s'", + volume.VolumeID, + volume.TenantUUID, + volume.Mounted, + volume.LastModified) + } + + return err +} + +// UpdateOsAgentVolume updates an existing OsAgentVolume by matching the tenantUUID +func (access *SqliteAccess) UpdateOsAgentVolume(ctx context.Context, volume *OsAgentVolume) error { + err := access.executeStatement(ctx, updateOsAgentVolumeStatement, volume.VolumeID, volume.Mounted, volume.LastModified, volume.TenantUUID) + if err != nil { + err = errors.WithMessagef(err, "couldn't update osAgentVolume info, tenantUUID '%s', mounted '%t', last modified '%s', volume id '%s'", + volume.TenantUUID, + volume.Mounted, + volume.LastModified, + volume.VolumeID) + } + + return err +} + +// GetOsAgentVolumeViaVolumeID gets an OsAgentVolume by its VolumeID +func (access *SqliteAccess) GetOsAgentVolumeViaVolumeID(ctx context.Context, volumeID string) (*OsAgentVolume, error) { + var tenantUUID string + + var mounted bool + + var lastModified time.Time + + err := access.querySimpleStatement(ctx, getOsAgentVolumeViaVolumeIDStatement, volumeID, &tenantUUID, &mounted, &lastModified) + if err != nil { + err = errors.WithMessagef(err, "couldn't get osAgentVolume info for volume id '%s'", volumeID) + } + + return NewOsAgentVolume(volumeID, tenantUUID, mounted, &lastModified), err +} + +// GetOsAgentVolumeViaTenantUUID gets an OsAgentVolume by its tenantUUID +func (access *SqliteAccess) GetOsAgentVolumeViaTenantUUID(ctx context.Context, tenantUUID string) (*OsAgentVolume, error) { + var volumeID string + + var mounted bool + + var lastModified time.Time + + err := access.querySimpleStatement(ctx, getOsAgentVolumeViaTenantUUIDStatement, tenantUUID, &volumeID, &mounted, &lastModified) + if err != nil { + err = errors.WithMessagef(err, "couldn't get osAgentVolume info for tenant uuid '%s'", tenantUUID) + } + + return NewOsAgentVolume(volumeID, tenantUUID, mounted, &lastModified), err +} + +// GetAllVolumes gets all the Volumes from the database +func (access *SqliteAccess) GetAllVolumes(ctx context.Context) ([]*Volume, error) { + rows, err := access.conn.QueryContext(ctx, getAllVolumesStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all the volumes")) + } + + volumes := []*Volume{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var id string + + var podName string + + var version string + + var tenantUUID string + + var mountAttempts int + + err := rows.Scan(&id, &podName, &version, &tenantUUID, &mountAttempts) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan volume from database")) + } + + volumes = append(volumes, NewVolume(id, podName, version, tenantUUID, mountAttempts)) + } + + return volumes, nil +} + +// GetAllDynakubes gets all the Dynakubes from the database +func (access *SqliteAccess) GetAllDynakubes(ctx context.Context) ([]*Dynakube, error) { + rows, err := access.conn.QueryContext(ctx, getAllDynakubesStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all the dynakubes")) + } + + dynakubes := []*Dynakube{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var name string + + var version string + + var tenantUUID string + + var imageDigest string + + var maxFailedMountAttempts int + + err := rows.Scan(&name, &tenantUUID, &version, &imageDigest, &maxFailedMountAttempts) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan dynakube from database")) + } + + dynakubes = append(dynakubes, NewDynakube(name, tenantUUID, version, imageDigest, maxFailedMountAttempts)) + } + + return dynakubes, nil +} + +// GetAllOsAgentVolumes gets all the OsAgentVolume from the database +func (access *SqliteAccess) GetAllOsAgentVolumes(ctx context.Context) ([]*OsAgentVolume, error) { + rows, err := access.conn.QueryContext(ctx, getAllOsAgentVolumes) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all the osagent volumes")) + } + + osVolumes := []*OsAgentVolume{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var volumeID string + + var tenantUUID string + + var mounted bool + + var timeStamp time.Time + + err := rows.Scan(&tenantUUID, &volumeID, &mounted, &timeStamp) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan osagent volume from database")) + } + + osVolumes = append(osVolumes, NewOsAgentVolume(volumeID, tenantUUID, mounted, &timeStamp)) + } + + return osVolumes, nil +} + +// GetUsedVersions gets all UNIQUE versions present in the `volumes` for a given tenantUUID database in map. +// Map is used to make sure we don't return the same version multiple time, +// it's also easier to check if a version is in it or not. (a Set in style of Golang) +func (access *SqliteAccess) GetUsedVersions(ctx context.Context, tenantUUID string) (map[string]bool, error) { + rows, err := access.conn.QueryContext(ctx, getUsedVersionsStatement, tenantUUID) + if err != nil { + return nil, errors.WithStack(errors.WithMessagef(err, "couldn't get used version info for tenant uuid '%s'", tenantUUID)) + } + + versions := map[string]bool{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var version string + + err := rows.Scan(&version) + if err != nil { + return nil, errors.WithStack(errors.WithMessagef(err, "couldn't scan used version info for tenant uuid '%s'", tenantUUID)) + } + + versions[version] = true + } + + return versions, nil +} + +// GetUsedVersions gets all UNIQUE versions present in the `volumes` database in map. +// Map is used to make sure we don't return the same version multiple time, +// it's also easier to check if a version is in it or not. (a Set in style of Golang) +func (access *SqliteAccess) GetAllUsedVersions(ctx context.Context) (map[string]bool, error) { + rows, err := access.conn.QueryContext(ctx, getAllUsedVersionsStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessagef(err, "couldn't get all used version info")) + } + + versions := map[string]bool{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var version string + + err := rows.Scan(&version) + if err != nil { + return nil, errors.WithStack(errors.WithMessagef(err, "couldn't scan used version info")) + } + + if _, ok := versions[version]; !ok { + versions[version] = true + } + } + + return versions, nil +} + +// GetLatestVersions gets all UNIQUE latestVersions present in the `dynakubes` database in map. +// Map is used to make sure we don't return the same version multiple time, +// it's also easier to check if a version is in it or not. (a Set in style of Golang) +func (access *SqliteAccess) GetLatestVersions(ctx context.Context) (map[string]bool, error) { + rows, err := access.conn.QueryContext(ctx, getLatestVersionsStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all the latests version info for tenant uuid")) + } + + versions := map[string]bool{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var version string + + err := rows.Scan(&version) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan latest version info ")) + } + + versions[version] = true + } + + return versions, nil +} + +// GetUsedImageDigests gets all UNIQUE image digests present in the `dynakubes` database in a map. +// Map is used to make sure we don't return the same digest multiple time, +// it's also easier to check if a digest is in it or not. (a Set in style of Golang) +func (access *SqliteAccess) GetUsedImageDigests(ctx context.Context) (map[string]bool, error) { + rows, err := access.conn.QueryContext(ctx, getUsedImageDigestStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get used image digests from database")) + } + + imageDigests := map[string]bool{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var digest string + + err := rows.Scan(&digest) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "failed to scan from image digests database")) + } + + if _, ok := imageDigests[digest]; !ok { + imageDigests[digest] = true + } + } + + return imageDigests, nil +} + +// IsImageDigestUsed checks if the specified image digest is present in the database. +func (access *SqliteAccess) IsImageDigestUsed(ctx context.Context, imageDigest string) (bool, error) { + var count int + + err := access.querySimpleStatement(ctx, countImageDigestStatement, imageDigest, &count) + if err != nil { + return false, errors.WithMessagef(err, "couldn't count usage of image digest: %s", imageDigest) + } + + return count > 0, nil +} + +// GetPodNames gets all PodNames present in the `volumes` database in map with their corresponding volumeIDs. +func (access *SqliteAccess) GetPodNames(ctx context.Context) (map[string]string, error) { + rows, err := access.conn.QueryContext(ctx, getPodNamesStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all pod names")) + } + + podNames := map[string]string{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var podName string + + var volumeID string + + err := rows.Scan(&volumeID, &podName) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan pod name from database")) + } + + podNames[podName] = volumeID + } + + return podNames, nil +} + +// GetTenantsToDynakubes gets all Dynakubes and maps their name to the corresponding TenantUUID. +func (access *SqliteAccess) GetTenantsToDynakubes(ctx context.Context) (map[string]string, error) { + rows, err := access.conn.QueryContext(ctx, getTenantsToDynakubesStatement) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't get all tenants to dynakube metadata")) + } + + dynakubes := map[string]string{} + + defer func() { _ = rows.Close() }() + + for rows.Next() { + var uuid string + + var dynakube string + + err := rows.Scan(&uuid, &dynakube) + if err != nil { + return nil, errors.WithStack(errors.WithMessage(err, "couldn't scan tenant to dynakube metadata from database")) + } + + dynakubes[dynakube] = uuid + } + + return dynakubes, nil +} + +func (access *SqliteAccess) GetAllAppMounts(ctx context.Context) []*Volume { + rows, err := access.conn.QueryContext(ctx, getAllAppMountsStatement) + if err != nil { + log.Info("skipping migration due to error getting all app mounts", "error", err) + + return nil + } + + defer func() { _ = rows.Close() }() + + var volumes = make([]*Volume, 0) + + for rows.Next() { + var code_module_version, volume_meta_id, location, pod_name string + + var mount_attempts int + + err := rows.Scan(&volume_meta_id, &code_module_version, &location, &mount_attempts, &pod_name) + if err != nil { + log.Info("couldn't scan app_mount from database", "error", err) + + continue + } + + tenantUUID := getTenantUUIDFromLocation(location) + if tenantUUID == "" { + log.Info("could not parse tenantUUID from location", "location", location) + + continue + } + + volumes = append(volumes, NewVolume(volume_meta_id, pod_name, code_module_version, tenantUUID, mount_attempts)) + } + + return volumes +} + +func (access *SqliteAccess) DeleteAppMount(ctx context.Context, appMountID string) error { + err := access.executeStatement(ctx, deleteAppMountStatement, appMountID) + if err != nil { + return err + } + + return nil +} + +// Executes the provided SQL statement on the database. +// The `vars` are passed to the SQL statement (in-order), to fill in the SQL wildcards. +func (access *SqliteAccess) executeStatement(ctx context.Context, statement string, vars ...any) error { + _, err := access.conn.ExecContext(ctx, statement, vars...) + + return errors.WithStack(err) +} + +// Executes the provided SQL SELECT statement on the database. +// The SQL statement should always return a single row. +// The `id` is passed to the SQL query to fill in an SQL wildcard +// The `vars` are filled with the values of the return of the SELECT statement, so the `vars` need to be pointers. +func (access *SqliteAccess) querySimpleStatement(ctx context.Context, statement, id string, vars ...any) error { + row := access.conn.QueryRowContext(ctx, statement, id) + + err := row.Scan(vars...) + if err != nil && err != sql.ErrNoRows { + return errors.WithStack(err) + } + + return nil +} + +func getTenantUUIDFromLocation(location string) string { + var tennantUUIDIndex = 2 + + result := strings.Split(location, "/") + if len(result) > tennantUUIDIndex { + return result[tennantUUIDIndex] + } + + return "" +} diff --git a/pkg/controllers/csi/metadata/sqlite_gorm_client.go b/pkg/controllers/csi/metadata/sqlite_gorm_client.go deleted file mode 100644 index 1c02fba97c..0000000000 --- a/pkg/controllers/csi/metadata/sqlite_gorm_client.go +++ /dev/null @@ -1,470 +0,0 @@ -package metadata - -import ( - "context" - "strings" - - "github.com/go-gormigrate/gormigrate/v2" - "github.com/pkg/errors" - "gorm.io/driver/sqlite" - "gorm.io/gorm" - "gorm.io/gorm/logger" -) - -type Access interface { - SchemaMigration() error - - ReadTenantConfig(tenantConfig TenantConfig) (*TenantConfig, error) - ReadCodeModule(codeModule CodeModule) (*CodeModule, error) - ReadOSMount(osMount OSMount) (*OSMount, error) - ReadUnscopedOSMount(osMount OSMount) (*OSMount, error) - ReadAppMount(appMount AppMount) (*AppMount, error) - - ReadTenantConfigs() ([]TenantConfig, error) - ReadCodeModules() ([]CodeModule, error) - ReadOSMounts() ([]OSMount, error) - ReadAppMounts() ([]AppMount, error) - ReadVolumeMetas() ([]VolumeMeta, error) - - CreateTenantConfig(tenantConfig *TenantConfig) error - CreateCodeModule(codeModule *CodeModule) error - CreateOSMount(osMount *OSMount) error - CreateAppMount(appMount *AppMount) error - - UpdateTenantConfig(tenantConfig *TenantConfig) error - UpdateOSMount(osMount *OSMount) error - UpdateAppMount(appMount *AppMount) error - - DeleteTenantConfig(tenantConfig *TenantConfig, cascade bool) error - DeleteCodeModule(codeModule *CodeModule) error - DeleteOSMount(osMount *OSMount) error - DeleteAppMount(appMount *AppMount) error - - IsCodeModuleOrphaned(codeModule *CodeModule) (bool, error) - RestoreOSMount(osMount *OSMount) (*OSMount, error) -} - -// TODO: Come up with a less confusing name -type AccessCleaner interface { - Access - Cleaner -} - -type GormConn struct { - ctx context.Context - db *gorm.DB -} - -var _ Access = &GormConn{} - -// NewAccess creates a new gorm db connection to the database. -func NewAccess(ctx context.Context, path string) (*GormConn, error) { - // we need to explicitly enable foreign_keys for sqlite to have sqlite enforce this constraint - if strings.Contains(path, "?") { - path += "&_foreign_keys=on" - } else { - path += "?_foreign_keys=on" - } - - db, err := gorm.Open(sqlite.Open(path), &gorm.Config{Logger: logger.Discard}) - - if err != nil { - return &GormConn{}, err - } - - return &GormConn{ctx: ctx, db: db}, nil -} - -// SchemaMigration runs gormigrate migrations to create tables -func (conn *GormConn) SchemaMigration() error { - err := conn.InitGormSchema() - if err != nil { - return err - } - - return gormigrate.New(conn.db, gormigrate.DefaultOptions, []*gormigrate.Migration{ - { - ID: "202403041200", - Migrate: dataMigration, - Rollback: func(tx *gorm.DB) error { - return nil - }, - }, - { - ID: "202406061200", - Migrate: removeOldTables, - Rollback: func(tx *gorm.DB) error { - return nil - }, - }, - }).Migrate() -} - -func (conn *GormConn) InitGormSchema() error { - m := gormigrate.New(conn.db, gormigrate.DefaultOptions, []*gormigrate.Migration{}) - m.InitSchema(func(tx *gorm.DB) error { - err := tx.AutoMigrate( - &TenantConfig{}, - &CodeModule{}, - &OSMount{}, - &AppMount{}, - &VolumeMeta{}, - ) - if err != nil { - return err - } - // all other constraints, indexes, etc... - return nil - }) - - _ = m.Migrate() - - return nil -} - -func (conn *GormConn) ReadTenantConfig(tenantConfig TenantConfig) (*TenantConfig, error) { - var record *TenantConfig - - if (tenantConfig == TenantConfig{}) { - return nil, errors.New("Can't query for empty TenantConfig") - } - - result := conn.db.WithContext(conn.ctx).Find(&record, tenantConfig) - if result.Error != nil { - return nil, result.Error - } - - if (*record == TenantConfig{}) { - return nil, gorm.ErrRecordNotFound - } - - return record, nil -} - -func (conn *GormConn) ReadCodeModule(codeModule CodeModule) (*CodeModule, error) { - var record *CodeModule - - if (codeModule == CodeModule{}) { - return nil, errors.New("Can't query for empty CodeModule") - } - - result := conn.db.WithContext(conn.ctx).Find(&record, codeModule) - if result.Error != nil { - return nil, result.Error - } - - if (*record == CodeModule{}) { - return nil, gorm.ErrRecordNotFound - } - - return record, nil -} - -func (conn *GormConn) ReadOSMount(osMount OSMount) (*OSMount, error) { - var record *OSMount - - if (osMount == OSMount{}) { - return nil, errors.New("Can't query for empty OSMount") - } - - result := conn.db.WithContext(conn.ctx).Preload("VolumeMeta").Find(&record, osMount) - if result.Error != nil { - return nil, result.Error - } - - if (*record == OSMount{}) { - return nil, gorm.ErrRecordNotFound - } - - return record, nil -} - -func (conn *GormConn) ReadUnscopedOSMount(osMount OSMount) (*OSMount, error) { - var record *OSMount - - if (osMount == OSMount{}) { - return nil, errors.New("Can't query for empty OSMount") - } - - result := conn.db.WithContext(conn.ctx).Preload("VolumeMeta").Unscoped().Find(&record, osMount) - if result.Error != nil { - return nil, result.Error - } - - if (*record == OSMount{}) { - return nil, gorm.ErrRecordNotFound - } - - return record, nil -} - -func (conn *GormConn) ReadAppMount(appMount AppMount) (*AppMount, error) { - var record *AppMount - - if (appMount == AppMount{}) { - return nil, errors.New("Can't query for empty AppMount") - } - - result := conn.db.WithContext(conn.ctx).Preload("VolumeMeta").Preload("CodeModule").Find(&record, appMount) - if result.Error != nil { - return nil, result.Error - } - - if (*record == AppMount{}) { - return nil, gorm.ErrRecordNotFound - } - - return record, nil -} - -func (conn *GormConn) ReadTenantConfigs() ([]TenantConfig, error) { - var tenantConfigs []TenantConfig - - result := conn.db.WithContext(conn.ctx).Find(&tenantConfigs) - if result.Error != nil { - return nil, result.Error - } - - return tenantConfigs, nil -} - -func (conn *GormConn) ReadCodeModules() ([]CodeModule, error) { - var codeModules []CodeModule - - result := conn.db.WithContext(conn.ctx).Find(&codeModules) - if result.Error != nil { - return nil, result.Error - } - - return codeModules, nil -} - -func (conn *GormConn) ReadOSMounts() ([]OSMount, error) { - var osMounts []OSMount - - result := conn.db.WithContext(conn.ctx).Preload("VolumeMeta").Find(&osMounts) - if result.Error != nil { - return nil, result.Error - } - - return osMounts, nil -} - -func (conn *GormConn) ReadAppMounts() ([]AppMount, error) { - var appMounts []AppMount - - result := conn.db.WithContext(conn.ctx).Preload("VolumeMeta").Preload("CodeModule").Find(&appMounts) - if result.Error != nil { - return nil, result.Error - } - - return appMounts, nil -} - -func (conn *GormConn) ReadVolumeMetas() ([]VolumeMeta, error) { - var volumeMetas []VolumeMeta - - result := conn.db.WithContext(conn.ctx).Find(&volumeMetas) - if result.Error != nil { - return nil, result.Error - } - - return volumeMetas, nil -} - -func (conn *GormConn) CreateTenantConfig(tenantConfig *TenantConfig) error { - return conn.db.WithContext(conn.ctx).Create(tenantConfig).Error -} - -func (conn *GormConn) CreateCodeModule(codeModule *CodeModule) error { - return conn.db.WithContext(conn.ctx).Create(codeModule).Error -} - -func (conn *GormConn) CreateOSMount(osMount *OSMount) error { - return conn.db.WithContext(conn.ctx).Create(osMount).Error -} -func (conn *GormConn) CreateAppMount(appMount *AppMount) error { - return conn.db.WithContext(conn.ctx).Create(appMount).Error -} - -func (conn *GormConn) UpdateTenantConfig(tenantConfig *TenantConfig) error { - if (tenantConfig == nil || *tenantConfig == TenantConfig{}) { - return errors.New("Can't save an empty TenantConfig") - } - - return conn.db.WithContext(conn.ctx).Save(tenantConfig).Error -} - -func (conn *GormConn) UpdateOSMount(osMount *OSMount) error { - if (osMount == nil || *osMount == OSMount{}) { - return errors.New("Can't save an empty TenantConfig") - } - - return conn.db.WithContext(conn.ctx).Updates(osMount).Error -} -func (conn *GormConn) UpdateAppMount(appMount *AppMount) error { - if (appMount == nil || *appMount == AppMount{}) { - return errors.New("Can't save an empty AppMount") - } - - return conn.db.WithContext(conn.ctx).Updates(appMount).Error -} - -func (conn *GormConn) DeleteTenantConfig(tenantConfig *TenantConfig, cascade bool) error { - if (tenantConfig == nil || *tenantConfig == TenantConfig{}) { - return nil - } - - tenantConfig, err := conn.ReadTenantConfig(*tenantConfig) - if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { - return nil - } else if err != nil { - return err - } - - err = conn.db.WithContext(conn.ctx).Delete(&TenantConfig{}, &tenantConfig).Error - if err != nil { - return err - } - - if cascade { - orphaned, err := conn.IsCodeModuleOrphaned(&CodeModule{Version: tenantConfig.DownloadedCodeModuleVersion}) - if err != nil { - return err - } - - if orphaned { - err = conn.DeleteCodeModule(&CodeModule{Version: tenantConfig.DownloadedCodeModuleVersion}) - if err != nil { - return err - } - } - } - - return nil -} - -func (conn *GormConn) DeleteCodeModule(codeModule *CodeModule) error { - if (codeModule == nil || *codeModule == CodeModule{}) { - return errors.New("Can't delete an empty CodeModule") - } - - return conn.db.WithContext(conn.ctx).Delete(&CodeModule{}, codeModule).Error -} - -func (conn *GormConn) DeleteOSMount(osMount *OSMount) error { - if (osMount == nil || *osMount == OSMount{}) { - return errors.New("Can't delete an empty OSMount") - } - - return conn.db.WithContext(conn.ctx).Delete(&OSMount{}, osMount).Error -} - -func (conn *GormConn) DeleteAppMount(appMount *AppMount) error { - if (appMount == nil || *appMount == AppMount{}) { - return errors.New("Can't delete an empty AppMount") - } - - orphaned, err := conn.IsCodeModuleOrphaned(&CodeModule{Version: appMount.CodeModuleVersion}) - if err != nil { - return err - } - - if orphaned { - err = conn.DeleteCodeModule(&CodeModule{Version: appMount.CodeModuleVersion}) - if err != nil { - return err - } - } - - return conn.db.WithContext(conn.ctx).Delete(&AppMount{}, appMount).Error -} - -func (conn *GormConn) IsCodeModuleOrphaned(codeModule *CodeModule) (bool, error) { - var tenantConfigResults []TenantConfig - - var appMountResults []AppMount - - if (codeModule == nil || *codeModule == CodeModule{}) { - return false, nil - } - - err := conn.db.WithContext(conn.ctx).Find(&tenantConfigResults, &TenantConfig{DownloadedCodeModuleVersion: codeModule.Version}).Error - if err != nil { - return false, err - } - - err = conn.db.WithContext(conn.ctx).Find(&appMountResults, &AppMount{CodeModuleVersion: codeModule.Version}).Error - if err != nil { - return false, err - } - - if len(tenantConfigResults) == 0 && len(appMountResults) == 0 { - return true, nil - } - - return false, nil -} - -func (conn *GormConn) RestoreOSMount(osMount *OSMount) (*OSMount, error) { - osMount.DeletedAt.Valid = false - - err := conn.db.WithContext(conn.ctx).Unscoped().Updates(osMount).Error - if err != nil { - return nil, err - } - - return osMount, nil -} - -type AccessOverview struct { - VolumeMetas []VolumeMeta `json:"volumeMetas"` - AppMounts []AppMount `json:"appMounts"` - TenantConfigs []TenantConfig `json:"tenantConfigs"` - CodeModules []CodeModule `json:"codeModules"` - OSMounts []OSMount `json:"osMounts"` -} - -func NewAccessOverview(access Access) (*AccessOverview, error) { - volumeMetas, err := access.ReadVolumeMetas() - if err != nil { - return nil, err - } - - appMounts, err := access.ReadAppMounts() - if err != nil { - return nil, err - } - - tenantConfigs, err := access.ReadTenantConfigs() - if err != nil { - return nil, err - } - - codeModules, err := access.ReadCodeModules() - if err != nil { - return nil, err - } - - osMounts, err := access.ReadOSMounts() - if err != nil { - return nil, err - } - - return &AccessOverview{ - VolumeMetas: volumeMetas, - AppMounts: appMounts, - TenantConfigs: tenantConfigs, - CodeModules: codeModules, - OSMounts: osMounts, - }, nil -} - -func LogAccessOverview(access Access) { - overview, err := NewAccessOverview(access) - if err != nil { - log.Error(err, "Failed to get an overview of the stored csi metadata") - } - - log.Info("The current overview of the csi metadata", "overview", overview) -} diff --git a/pkg/controllers/csi/metadata/sqlite_gorm_client_test.go b/pkg/controllers/csi/metadata/sqlite_gorm_client_test.go deleted file mode 100644 index 4cb383b772..0000000000 --- a/pkg/controllers/csi/metadata/sqlite_gorm_client_test.go +++ /dev/null @@ -1,695 +0,0 @@ -package metadata - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gorm.io/gorm" -) - -func TestSchemaMigration(t *testing.T) { - t.Run("run migration", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - err = db.SchemaMigration() - require.NoError(t, err) - }) -} - -func TestCreateTenantConfig(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - tenantConfig := &TenantConfig{ - Name: "somename", - ConfigDirPath: "somewhere", - DownloadedCodeModuleVersion: "1.2.3", - TenantUUID: "abc123", - } - - err = db.CreateTenantConfig(tenantConfig) - require.NoError(t, err) - - readTenantConfig := &TenantConfig{TenantUUID: "abc123"} - db.db.WithContext(context.Background()).First(readTenantConfig) - assert.Equal(t, readTenantConfig.UID, tenantConfig.UID) - - err = db.CreateTenantConfig(nil) - require.Error(t, err) -} - -func TestReadTenantConfig(t *testing.T) { - db, err := setupDB() - setupPostReconcileData(db) - - require.NoError(t, err) - - tc, err := db.ReadTenantConfig(TenantConfig{TenantUUID: "abc123"}) - require.NoError(t, err) - - assert.NotNil(t, tc) - assert.Equal(t, "abc123", tc.TenantUUID) - - _, err = db.ReadTenantConfig(TenantConfig{}) - require.Error(t, err) - - _, err = db.ReadTenantConfig(TenantConfig{TenantUUID: "unknown"}) - require.Error(t, err) -} - -func TestUpdateTenantConfig(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - tenantConfig, err := db.ReadTenantConfig(TenantConfig{TenantUUID: "abc123"}) - require.NoError(t, err) - - tenantConfig.DownloadedCodeModuleVersion = "2.3.4" - err = db.UpdateTenantConfig(tenantConfig) - require.NoError(t, err) - - readTenantConfig := &TenantConfig{TenantUUID: "abc123"} - db.db.WithContext(context.Background()).First(readTenantConfig) - assert.Equal(t, tenantConfig.UID, readTenantConfig.UID) - assert.Equal(t, "2.3.4", readTenantConfig.DownloadedCodeModuleVersion) - - err = db.UpdateTenantConfig(nil) - require.Error(t, err) - - err = db.UpdateTenantConfig(&TenantConfig{}) - require.Error(t, err) -} - -func TestDeleteTenantConfig(t *testing.T) { - var tenantConfig *TenantConfig - - var codeModules []CodeModule - - t.Run("on cascade deletion true", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - db.db.Create(&TenantConfig{ - TenantUUID: "uuid", - DownloadedCodeModuleVersion: "1.0", - }) - - db.db.Create(&CodeModule{ - Version: "1.0", - }) - - db.db.WithContext(context.Background()).Find(&tenantConfig, TenantConfig{TenantUUID: "uuid"}) - - db.DeleteTenantConfig(&TenantConfig{UID: tenantConfig.UID}, true) - - _, err = db.ReadTenantConfig(TenantConfig{UID: tenantConfig.UID}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - - codeModules, err = db.ReadCodeModules() - assert.Empty(t, codeModules) - require.NoError(t, err) - }) - t.Run("on cascade deletion false", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - db.db.Create(&TenantConfig{ - TenantUUID: "uuid", - DownloadedCodeModuleVersion: "1.0", - }) - - db.db.Create(&CodeModule{ - Version: "1.0", - }) - - db.db.WithContext(context.Background()).Find(&tenantConfig, TenantConfig{TenantUUID: "uuid"}) - - db.DeleteTenantConfig(&TenantConfig{UID: tenantConfig.UID}, false) - - _, err = db.ReadTenantConfig(TenantConfig{UID: tenantConfig.UID}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - - codeModules, err = db.ReadCodeModules() - assert.NotEmpty(t, codeModules) - require.NoError(t, err) - }) -} - -func TestCreateCodeModule(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - codeModule := &CodeModule{ - Version: "1.2.3", - Location: "someplace", - } - err = db.CreateCodeModule(codeModule) - require.NoError(t, err) - - readCodeModule := &CodeModule{Version: "1.2.3"} - db.db.WithContext(context.Background()).First(readCodeModule) - assert.Equal(t, "someplace", readCodeModule.Location) - - err = db.CreateCodeModule(nil) - require.Error(t, err) - - err = db.CreateCodeModule(&CodeModule{ - Version: "1.2.3", - }) - require.Error(t, err) -} - -func TestReadCodeModule(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - codeModule, err := db.ReadCodeModule(CodeModule{Version: "1.2.3"}) - require.NoError(t, err) - - assert.NotNil(t, codeModule) - assert.Equal(t, "someplace", codeModule.Location) - - _, err = db.ReadCodeModule(CodeModule{Version: ""}) - require.Error(t, err) - - _, err = db.ReadCodeModule(CodeModule{Version: "unknown"}) - require.Error(t, err) -} - -func TestIsCodeModuleOrphaned(t *testing.T) { - t.Run("is not orphaned because of existing TenantConfig", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - tenantConfig := &TenantConfig{ - DownloadedCodeModuleVersion: "1.0", - UID: "1", - } - codeModule := &CodeModule{ - Version: "1.0", - } - - db.db.Create(tenantConfig) - db.db.Create(codeModule) - - got, err := db.IsCodeModuleOrphaned(codeModule) - assert.False(t, got) - assert.NoError(t, err) - }) - - t.Run("is not orphaned because of existing AppMount", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - codeModule := &CodeModule{ - Version: "1.0", - } - appMount := &AppMount{ - CodeModuleVersion: "1.0", - VolumeMetaID: "1", - CodeModule: *codeModule, - VolumeMeta: VolumeMeta{ID: "1"}, - } - db.db.Create(appMount) - - got, err := db.IsCodeModuleOrphaned(codeModule) - assert.False(t, got) - assert.NoError(t, err) - }) - t.Run("is orphaned", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - codeModule := &CodeModule{ - Version: "1.0", - } - db.db.Create(codeModule) - - got, err := db.IsCodeModuleOrphaned(codeModule) - assert.True(t, got) - assert.NoError(t, err) - }) -} - -func TestRestoreOSMount(t *testing.T) { - t.Run("Restore OSMount", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - setupPostPublishData(db) - - err = db.DeleteOSMount(&OSMount{TenantUUID: "abc123"}) - require.NoError(t, err) - - osMount, err := db.ReadOSMount(OSMount{TenantUUID: "abc123"}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, osMount) - - osMount, err = db.ReadUnscopedOSMount(OSMount{TenantUUID: "abc123"}) - require.NoError(t, err) - assert.NotNil(t, osMount) - - osMount, err = db.RestoreOSMount(osMount) - require.NoError(t, err) - assert.NotNil(t, osMount) - - osMount, err = db.ReadOSMount(OSMount{TenantUUID: "abc123"}) - require.NoError(t, err) - assert.NotNil(t, osMount) - }) -} - -func TestSoftDeleteCodeModule(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - codeModule, err := db.ReadCodeModule(CodeModule{Version: "1.2.3"}) - require.NoError(t, err) - - assert.NotNil(t, codeModule) - assert.Equal(t, "someplace", codeModule.Location) - - err = db.DeleteCodeModule(codeModule) - require.NoError(t, err) - - readCodeModule := CodeModule{Version: "1.2.3"} - db.db.WithContext(context.Background()).First(readCodeModule) - assert.Equal(t, int64(0), db.db.RowsAffected) - - err = db.DeleteCodeModule(nil) - require.Error(t, err) - - err = db.DeleteCodeModule(&CodeModule{}) - require.Error(t, err) -} - -func TestCreateOsMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - tenant, err := db.ReadTenantConfig(TenantConfig{TenantUUID: "abc123"}) - require.NoError(t, err) - - vm := VolumeMeta{ - ID: "osmount1", - PodUid: "pod1", - PodName: "podi", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - - osMount := OSMount{ - VolumeMeta: vm, - Location: "somewhere", - MountAttempts: 1, - TenantUUID: tenant.TenantUUID, - TenantConfig: *tenant, - } - - err = db.CreateOSMount(&osMount) - require.NoError(t, err) - - readOSMount := &OSMount{TenantUUID: "abc123"} - db.db.WithContext(context.Background()).First(readOSMount) - assert.Equal(t, "somewhere", readOSMount.Location) - - err = db.CreateOSMount(nil) - require.Error(t, err) -} - -func TestReadOSMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - osMount, err := db.ReadOSMount(OSMount{TenantUUID: "abc123"}) - require.NoError(t, err) - - assert.NotNil(t, osMount) - assert.Equal(t, "osmount1", osMount.VolumeMeta.ID) - - osMount, err = db.ReadOSMount(OSMount{TenantUUID: ""}) - require.Error(t, err) - assert.Equal(t, "Can't query for empty OSMount", err.Error()) - assert.Nil(t, osMount) - - osMount, err = db.ReadOSMount(OSMount{TenantUUID: "unknown"}) - require.ErrorIs(t, err, gorm.ErrRecordNotFound) - assert.Nil(t, osMount) - - osMount, err = db.ReadOSMount(OSMount{VolumeMetaID: "osmount1"}) - require.NoError(t, err) - assert.NotNil(t, osMount) -} - -func TestUpdateOsMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - osMount, err := db.ReadOSMount(OSMount{TenantUUID: "abc123"}) - require.NoError(t, err) - - osMount.MountAttempts = 5 - - err = db.UpdateOSMount(osMount) - require.NoError(t, err) - - readOSMount := &OSMount{TenantUUID: "abc123"} - db.db.WithContext(context.Background()).First(readOSMount) - assert.Equal(t, int64(5), readOSMount.MountAttempts) - - err = db.UpdateOSMount(nil) - require.Error(t, err) - - err = db.UpdateOSMount(&OSMount{}) - require.Error(t, err) -} - -func TestCreateAppMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - tenantConfig, err := db.ReadTenantConfig(TenantConfig{TenantUUID: "abc123"}) - require.NoError(t, err) - - cm, err := db.ReadCodeModule(CodeModule{Version: tenantConfig.DownloadedCodeModuleVersion}) - require.NoError(t, err) - - vm := VolumeMeta{ - ID: "appmount1", - PodUid: "pod111", - PodName: "podiv", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - appMount := &AppMount{ - VolumeMeta: vm, - Location: "loc1", - MountAttempts: 1, - CodeModule: *cm, - } - - err = db.CreateAppMount(appMount) - require.NoError(t, err) - - readAppMount := &AppMount{VolumeMetaID: "appmount1"} - db.db.WithContext(context.Background()).First(readAppMount) - assert.Equal(t, "loc1", readAppMount.Location) - - err = db.CreateAppMount(nil) - require.Error(t, err) - - err = db.CreateAppMount(&AppMount{}) - require.Error(t, err) - - err = db.CreateAppMount(&AppMount{ - VolumeMeta: vm, - }) - require.Error(t, err) - - err = db.CreateAppMount(&AppMount{ - VolumeMeta: vm, - CodeModule: *cm, - }) - require.Error(t, err) - - err = db.CreateAppMount(&AppMount{ - VolumeMeta: vm, - CodeModule: *cm, - Location: "somewhere", - }) - require.Error(t, err) -} - -func TestReadAppMounts(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - appMounts, err := db.ReadAppMounts() - require.NoError(t, err) - - assert.NotNil(t, appMounts) - assert.NotEmpty(t, len(appMounts)) - assert.Equal(t, "appmount1", appMounts[0].VolumeMeta.ID) -} - -func TestReadAppMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - appMount, err := db.ReadAppMount(AppMount{VolumeMeta: VolumeMeta{ID: "appmount1"}}) - require.NoError(t, err) - - assert.NotNil(t, appMount) - assert.Equal(t, "appmount1", appMount.VolumeMeta.ID) - - _, err = db.ReadAppMount(AppMount{VolumeMeta: VolumeMeta{ID: ""}}) - require.Error(t, err) - - _, err = db.ReadAppMount(AppMount{VolumeMetaID: "unknown", VolumeMeta: VolumeMeta{ID: "unknown"}}) - require.Error(t, err) -} - -func TestUpdateAppMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - appMount, err := db.ReadAppMount(AppMount{VolumeMeta: VolumeMeta{ID: "appmount1"}}) - require.NoError(t, err) - - appMount.MountAttempts = 5 - - err = db.UpdateAppMount(appMount) - require.NoError(t, err) - - readAppMount := &AppMount{VolumeMetaID: "appmount1"} - db.db.WithContext(context.Background()).First(readAppMount) - assert.Equal(t, int64(5), readAppMount.MountAttempts) - - err = db.UpdateAppMount(nil) - require.Error(t, err) - - err = db.UpdateAppMount(&AppMount{}) - require.Error(t, err) -} - -func TestSoftDeleteAppMount(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostPublishData(db) - - appMount, err := db.ReadAppMount(AppMount{VolumeMeta: VolumeMeta{ID: "appmount1"}}) - require.NoError(t, err) - - assert.NotNil(t, appMount) - assert.Equal(t, "appmount1", appMount.VolumeMeta.ID) - - err = db.DeleteAppMount(appMount) - require.NoError(t, err) - - readAppMount := &AppMount{VolumeMetaID: "appmount1"} - db.db.WithContext(context.Background()).First(readAppMount) - assert.Equal(t, int64(0), db.db.RowsAffected) - - err = db.DeleteAppMount(nil) - require.Error(t, err) - - err = db.DeleteAppMount(&AppMount{}) - require.Error(t, err) -} - -func TestNewAccessOverview(t *testing.T) { - t.Run("storing one of each models", func(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - var tenantConfig *TenantConfig - - // create TenantConfig - db.db.Create(&TenantConfig{ - TenantUUID: "uuid", - }) - - // create AppMount, CodeModule and VolumeMeta - db.db.Create(&AppMount{ - CodeModuleVersion: "1.0", - VolumeMetaID: "1", - CodeModule: CodeModule{Version: "1.0"}, - VolumeMeta: VolumeMeta{ID: "1"}, - }) - - // create OSMount (and reference TenantConfig and VolumeMeta) - db.db.WithContext(context.Background()).Find(&tenantConfig, TenantConfig{TenantUUID: "uuid"}) - db.db.Create(&OSMount{ - VolumeMeta: VolumeMeta{ID: "1"}, - VolumeMetaID: "1", - TenantConfigUID: tenantConfig.UID, - TenantUUID: "uuid", - }) - - got, err := NewAccessOverview(db) - assert.NotNil(t, got) - require.NoError(t, err) - - assert.Len(t, got.AppMounts, 1) - assert.Len(t, got.CodeModules, 1) - assert.Len(t, got.OSMounts, 1) - assert.Len(t, got.TenantConfigs, 1) - assert.Len(t, got.VolumeMetas, 1) - }) -} - -func TestVolumeMetaValidation(t *testing.T) { - db, err := setupDB() - require.NoError(t, err) - - setupPostReconcileData(db) - - vm := &VolumeMeta{ - ID: "appmount1", - PodUid: "pod111", - PodName: "podiv", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - db.db.Create(vm) - - vm2 := &VolumeMeta{ - ID: "appmount2", - PodName: "podiv", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - db.db.Create(vm2) - - vm3 := &VolumeMeta{ - ID: "appmount3", - PodUid: "pod111", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - db.db.Create(vm3) - - vm4 := &VolumeMeta{ - ID: "appmount4", - PodUid: "pod111", - PodName: "podiv", - PodServiceAccount: "podsa", - } - db.db.Create(vm4) - - vm5 := &VolumeMeta{ - ID: "appmount5", - PodUid: "pod111", - PodName: "podiv", - PodNamespace: "testnamespace", - } - db.db.Create(vm5) -} - -func setupDB() (*GormConn, error) { - db, err := NewAccess(context.Background(), "file:csi_testdb?mode=memory") - if err != nil { - return nil, err - } - - err = db.InitGormSchema() - - if err != nil { - return nil, err - } - - return db, nil -} - -func setupPostReconcileData(conn *GormConn) { - ctxDB := conn.db.WithContext(conn.ctx) - - tenantConfig := &TenantConfig{ - Name: "abc123", - ConfigDirPath: "somewhere", - DownloadedCodeModuleVersion: "1.2.3", - TenantUUID: "abc123", - } - ctxDB.Create(tenantConfig) - - codeModule := &CodeModule{ - Version: "1.2.3", - Location: "someplace", - } - ctxDB.Create(codeModule) -} - -func setupPostPublishData(conn *GormConn) { - ctxDB := conn.db.WithContext(conn.ctx) - tenantConfig := &TenantConfig{ - Name: "abc123", - ConfigDirPath: "somewhere", - DownloadedCodeModuleVersion: "1.2.3", - TenantUUID: "abc123", - } - ctxDB.Create(tenantConfig) - - codeModule := &CodeModule{ - Version: "1.2.3", - Location: "someplace", - } - ctxDB.Create(codeModule) - - vmOM := VolumeMeta{ - ID: "osmount1", - PodUid: "pod1", - PodName: "podi", - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - osMount := &OSMount{ - VolumeMeta: vmOM, - VolumeMetaID: vmOM.ID, - Location: "somewhere", - TenantUUID: tenantConfig.TenantUUID, - TenantConfig: *tenantConfig, - MountAttempts: 1, - } - ctxDB.Create(osMount) - - for i := range 3 { - vmAP := VolumeMeta{ - ID: fmt.Sprintf("appmount%d", i+1), - PodUid: fmt.Sprintf("pod%d", i+1), - PodName: fmt.Sprintf("podName%d", i+1), - PodNamespace: "testnamespace", - PodServiceAccount: "podsa", - } - appMount := &AppMount{ - VolumeMeta: vmAP, - Location: fmt.Sprintf("loc%d", i+1), - MountAttempts: 1, - CodeModule: *codeModule, - } - ctxDB.Create(appMount) - } -} diff --git a/pkg/controllers/csi/metadata/sqlite_test.go b/pkg/controllers/csi/metadata/sqlite_test.go new file mode 100644 index 0000000000..19522aee3f --- /dev/null +++ b/pkg/controllers/csi/metadata/sqlite_test.go @@ -0,0 +1,645 @@ +package metadata + +import ( + "context" + "fmt" + "strconv" + "testing" + "time" + + dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta1/dynakube" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewAccess(t *testing.T) { + db, err := NewAccess(context.TODO(), ":memory:") + require.NoError(t, err) + assert.NotNil(t, db.(*SqliteAccess).conn) +} + +func TestSetup(t *testing.T) { + db := SqliteAccess{} + err := db.Setup(context.TODO(), ":memory:") + + require.NoError(t, err) + assert.True(t, checkIfTablesExist(&db)) +} + +func TestSetup_badPath(t *testing.T) { + db := SqliteAccess{} + err := db.Setup(context.TODO(), "/asd") + require.Error(t, err) + + assert.False(t, checkIfTablesExist(&db)) +} + +func TestConnect(t *testing.T) { + path := ":memory:" + db := SqliteAccess{} + err := db.connect(sqliteDriverName, path) + require.NoError(t, err) + assert.NotNil(t, db.conn) +} + +func TestConnect_badDriver(t *testing.T) { + db := SqliteAccess{} + err := db.connect("die", "") + require.Error(t, err) + assert.Nil(t, db.conn) +} + +func TestCreateTables(t *testing.T) { + ctx := context.TODO() + + t.Run("volume table is created correctly", func(t *testing.T) { + db := emptyMemoryDB() + + err := db.createTables(ctx) + require.NoError(t, err) + + var volumeTableName string + + row := db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", volumesTableName) + err = row.Scan(&volumeTableName) + require.NoError(t, err) + assert.Equal(t, volumesTableName, volumeTableName) + + rows, err := db.conn.Query("PRAGMA table_info(" + volumesTableName + ")") + require.NoError(t, err) + assert.NotNil(t, rows) + + columns := []string{ + "ID", + "PodName", + "Version", + "TenantUUID", + "MountAttempts", + } + + for _, column := range columns { + assert.True(t, rows.Next()) + + var id, name, columnType, notNull, primaryKey string + + var defaultValue = new(string) + + err = rows.Scan(&id, &name, &columnType, ¬Null, &defaultValue, &primaryKey) + + require.NoError(t, err) + assert.Equal(t, column, name) + + if column == "MountAttempts" { + assert.Equal(t, "0", *defaultValue) + assert.Equal(t, "1", notNull) + } + } + }) + t.Run("dynakube table is created correctly", func(t *testing.T) { + db := emptyMemoryDB() + + err := db.createTables(ctx) + require.NoError(t, err) + + var dkTable string + + row := db.conn.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name=?;", dynakubesTableName) + err = row.Scan(&dkTable) + require.NoError(t, err) + assert.Equal(t, dynakubesTableName, dkTable) + + rows, err := db.conn.Query("PRAGMA table_info(" + dynakubesTableName + ")") + require.NoError(t, err) + assert.NotNil(t, rows) + + columns := []string{ + "Name", + "TenantUUID", + "LatestVersion", + "ImageDigest", + "MaxFailedMountAttempts", + } + + for _, column := range columns { + assert.True(t, rows.Next()) + + var id, name, columnType, notNull, primaryKey string + + var defaultValue = new(string) + + err = rows.Scan(&id, &name, &columnType, ¬Null, &defaultValue, &primaryKey) + + require.NoError(t, err) + assert.Equal(t, column, name) + + if column == "MaxFailedMountAttempts" { + maxFailedMountAttempts, err := strconv.Atoi(*defaultValue) + require.NoError(t, err) + assert.Equal(t, strconv.Itoa(dynatracev1beta2.DefaultMaxFailedCsiMountAttempts), *defaultValue) + assert.Equal(t, dynatracev1beta2.DefaultMaxFailedCsiMountAttempts, maxFailedMountAttempts) + assert.Equal(t, "1", notNull) + } + } + }) +} + +func TestInsertDynakube(t *testing.T) { + testDynakube1 := createTestDynakube(1) + + db := FakeMemoryDB() + + err := db.InsertDynakube(context.TODO(), &testDynakube1) + require.NoError(t, err) + + var uuid, lv, name string + + var imageDigest string + + var maxMountAttempts int + + row := db.conn.QueryRow(fmt.Sprintf("SELECT * FROM %s WHERE TenantUUID = ?;", dynakubesTableName), testDynakube1.TenantUUID) + err = row.Scan(&name, &uuid, &lv, &imageDigest, &maxMountAttempts) + require.NoError(t, err) + assert.Equal(t, testDynakube1.TenantUUID, uuid) + assert.Equal(t, testDynakube1.LatestVersion, lv) + assert.Equal(t, testDynakube1.Name, name) + assert.Equal(t, testDynakube1.ImageDigest, imageDigest) + assert.Equal(t, testDynakube1.MaxFailedMountAttempts, maxMountAttempts) +} + +func TestGetDynakube_Empty(t *testing.T) { + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + + gt, err := db.GetDynakube(context.TODO(), testDynakube1.TenantUUID) + require.NoError(t, err) + assert.Nil(t, gt) +} + +func TestGetDynakube(t *testing.T) { + ctx := context.TODO() + + t.Run("get dynakube", func(t *testing.T) { + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + + dynakube, err := db.GetDynakube(ctx, testDynakube1.Name) + require.NoError(t, err) + assert.Equal(t, testDynakube1, *dynakube) + }) +} + +func TestUpdateDynakube(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + + copyDynakube := testDynakube1 + copyDynakube.LatestVersion = "132.546" + copyDynakube.ImageDigest = "" + copyDynakube.MaxFailedMountAttempts = 10 + err = db.UpdateDynakube(ctx, ©Dynakube) + require.NoError(t, err) + + var uuid, lv, name string + + var imageDigest string + + var maxFailedMountAttempts int + + row := db.conn.QueryRow(fmt.Sprintf("SELECT Name, TenantUUID, LatestVersion, ImageDigest, MaxFailedMountAttempts FROM %s WHERE Name = ?;", dynakubesTableName), copyDynakube.Name) + err = row.Scan(&name, &uuid, &lv, &imageDigest, &maxFailedMountAttempts) + + require.NoError(t, err) + assert.Equal(t, copyDynakube.TenantUUID, uuid) + assert.Equal(t, copyDynakube.LatestVersion, lv) + assert.Equal(t, copyDynakube.Name, name) + assert.Equal(t, copyDynakube.MaxFailedMountAttempts, maxFailedMountAttempts) + assert.Empty(t, imageDigest) +} + +func TestGetTenantsToDynakubes(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + testDynakube2 := createTestDynakube(2) + + db := FakeMemoryDB() + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + err = db.InsertDynakube(ctx, &testDynakube2) + require.NoError(t, err) + + dynakubes, err := db.GetTenantsToDynakubes(ctx) + require.NoError(t, err) + assert.Len(t, dynakubes, 2) + assert.Equal(t, testDynakube1.TenantUUID, dynakubes[testDynakube1.Name]) + assert.Equal(t, testDynakube2.TenantUUID, dynakubes[testDynakube2.Name]) +} + +func TestGetAllDynakubes(t *testing.T) { + ctx := context.TODO() + + t.Run("get multiple dynakubes", func(t *testing.T) { + testDynakube1 := createTestDynakube(1) + testDynakube2 := createTestDynakube(2) + + db := FakeMemoryDB() + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + err = db.InsertDynakube(ctx, &testDynakube2) + require.NoError(t, err) + + dynakubes, err := db.GetAllDynakubes(ctx) + require.NoError(t, err) + assert.Len(t, dynakubes, 2) + }) +} + +func TestGetAllVolumes(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testVolume2 := createTestVolume(2) + + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + err = db.InsertVolume(ctx, &testVolume2) + require.NoError(t, err) + + volumes, err := db.GetAllVolumes(ctx) + require.NoError(t, err) + assert.Len(t, volumes, 2) + assert.Equal(t, testVolume1, *volumes[0]) + assert.Equal(t, testVolume2, *volumes[1]) +} + +func TestGetAllOsAgentVolumes(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + testDynakube2 := createTestDynakube(2) + + now := time.Now() + osVolume1 := OsAgentVolume{ + VolumeID: "vol-1", + TenantUUID: testDynakube1.TenantUUID, + Mounted: true, + LastModified: &now, + } + osVolume2 := OsAgentVolume{ + VolumeID: "vol-2", + TenantUUID: testDynakube2.TenantUUID, + Mounted: true, + LastModified: &now, + } + db := FakeMemoryDB() + err := db.InsertOsAgentVolume(ctx, &osVolume1) + require.NoError(t, err) + err = db.InsertOsAgentVolume(ctx, &osVolume2) + require.NoError(t, err) + + osVolumes, err := db.GetAllOsAgentVolumes(ctx) + require.NoError(t, err) + assert.Len(t, osVolumes, 2) +} + +func TestDeleteDynakube(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + testDynakube2 := createTestDynakube(2) + + db := FakeMemoryDB() + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + err = db.InsertDynakube(ctx, &testDynakube2) + require.NoError(t, err) + + err = db.DeleteDynakube(ctx, testDynakube1.Name) + require.NoError(t, err) + dynakubes, err := db.GetTenantsToDynakubes(ctx) + require.NoError(t, err) + assert.Len(t, dynakubes, 1) + assert.Equal(t, testDynakube2.TenantUUID, dynakubes[testDynakube2.Name]) +} + +func TestGetVolume_Empty(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + db := FakeMemoryDB() + + vo, err := db.GetVolume(ctx, testVolume1.PodName) + require.NoError(t, err) + assert.Nil(t, vo) +} + +func TestInsertVolume(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + db := FakeMemoryDB() + + err := db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + + row := db.conn.QueryRow(fmt.Sprintf("SELECT * FROM %s WHERE ID = ?;", volumesTableName), testVolume1.VolumeID) + + var id string + + var puid string + + var ver string + + var tuid string + + var mountAttempts int + err = row.Scan(&id, &puid, &ver, &tuid, &mountAttempts) + + require.NoError(t, err) + assert.Equal(t, testVolume1.VolumeID, id) + assert.Equal(t, testVolume1.PodName, puid) + assert.Equal(t, testVolume1.Version, ver) + assert.Equal(t, testVolume1.TenantUUID, tuid) + assert.Equal(t, testVolume1.MountAttempts, mountAttempts) + + newPodName := "something-else" + testVolume1.PodName = newPodName + err = db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + + row = db.conn.QueryRow(fmt.Sprintf("SELECT * FROM %s WHERE ID = ?;", volumesTableName), testVolume1.VolumeID) + err = row.Scan(&id, &puid, &ver, &tuid, &mountAttempts) + + require.NoError(t, err) + assert.Equal(t, testVolume1.VolumeID, id) + assert.Equal(t, testVolume1.PodName, puid) + assert.Equal(t, testVolume1.Version, ver) + assert.Equal(t, testVolume1.TenantUUID, tuid) + assert.Equal(t, testVolume1.MountAttempts, mountAttempts) +} + +func TestInsertOsAgentVolume(t *testing.T) { + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + + now := time.Now() + volume := OsAgentVolume{ + VolumeID: "vol-4", + TenantUUID: testDynakube1.TenantUUID, + Mounted: true, + LastModified: &now, + } + + err := db.InsertOsAgentVolume(context.TODO(), &volume) + require.NoError(t, err) + + row := db.conn.QueryRow(fmt.Sprintf("SELECT * FROM %s WHERE TenantUUID = ?;", osAgentVolumesTableName), volume.TenantUUID) + + var volumeID string + + var tenantUUID string + + var mounted bool + + var lastModified time.Time + err = row.Scan(&tenantUUID, &volumeID, &mounted, &lastModified) + require.NoError(t, err) + assert.Equal(t, volumeID, volume.VolumeID) + assert.Equal(t, tenantUUID, volume.TenantUUID) + assert.Equal(t, mounted, volume.Mounted) + assert.True(t, volume.LastModified.Equal(lastModified)) +} + +func TestGetOsAgentVolumeViaVolumeID(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + + now := time.Now() + expected := OsAgentVolume{ + VolumeID: "vol-4", + TenantUUID: testDynakube1.TenantUUID, + Mounted: true, + LastModified: &now, + } + + err := db.InsertOsAgentVolume(ctx, &expected) + require.NoError(t, err) + actual, err := db.GetOsAgentVolumeViaVolumeID(ctx, expected.VolumeID) + require.NoError(t, err) + require.NoError(t, err) + assert.Equal(t, expected.VolumeID, actual.VolumeID) + assert.Equal(t, expected.TenantUUID, actual.TenantUUID) + assert.Equal(t, expected.Mounted, actual.Mounted) + assert.True(t, expected.LastModified.Equal(*actual.LastModified)) +} + +func TestGetOsAgentVolumeViaTennatUUID(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + + now := time.Now() + expected := OsAgentVolume{ + VolumeID: "vol-4", + TenantUUID: testDynakube1.TenantUUID, + Mounted: true, + LastModified: &now, + } + + err := db.InsertOsAgentVolume(ctx, &expected) + require.NoError(t, err) + actual, err := db.GetOsAgentVolumeViaTenantUUID(ctx, expected.TenantUUID) + require.NoError(t, err) + assert.Equal(t, expected.VolumeID, actual.VolumeID) + assert.Equal(t, expected.TenantUUID, actual.TenantUUID) + assert.Equal(t, expected.Mounted, actual.Mounted) + assert.True(t, expected.LastModified.Equal(*actual.LastModified)) +} + +func TestUpdateOsAgentVolume(t *testing.T) { + ctx := context.TODO() + testDynakube1 := createTestDynakube(1) + db := FakeMemoryDB() + + now := time.Now() + + oldEntry := OsAgentVolume{ + VolumeID: "vol-4", + TenantUUID: testDynakube1.TenantUUID, + Mounted: true, + LastModified: &now, + } + + err := db.InsertOsAgentVolume(ctx, &oldEntry) + require.NoError(t, err) + + newEntry := oldEntry + newEntry.Mounted = false + err = db.UpdateOsAgentVolume(ctx, &newEntry) + require.NoError(t, err) + + actual, err := db.GetOsAgentVolumeViaVolumeID(ctx, oldEntry.VolumeID) + require.NoError(t, err) + assert.Equal(t, oldEntry.VolumeID, actual.VolumeID) + assert.Equal(t, oldEntry.TenantUUID, actual.TenantUUID) + assert.NotEqual(t, oldEntry.Mounted, actual.Mounted) + assert.True(t, oldEntry.LastModified.Equal(*actual.LastModified)) +} + +func TestGetVolume(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + + volume, err := db.GetVolume(ctx, testVolume1.VolumeID) + require.NoError(t, err) + assert.Equal(t, testVolume1, *volume) +} + +func TestUpdateVolume(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + + require.NoError(t, err) + + testVolume1.PodName = "different pod name" + testVolume1.Version = "new version" + testVolume1.TenantUUID = "asdf-1234" + testVolume1.MountAttempts = 10 + err = db.InsertVolume(ctx, &testVolume1) + + require.NoError(t, err) + + insertedVolume, err := db.GetVolume(ctx, testVolume1.VolumeID) + + require.NoError(t, err) + assert.Equal(t, testVolume1.VolumeID, insertedVolume.VolumeID) + assert.Equal(t, testVolume1.PodName, insertedVolume.PodName) + assert.Equal(t, testVolume1.Version, insertedVolume.Version) + assert.Equal(t, testVolume1.TenantUUID, insertedVolume.TenantUUID) + assert.Equal(t, testVolume1.MountAttempts, insertedVolume.MountAttempts) +} + +func TestGetUsedVersions(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + testVolume11 := testVolume1 + testVolume11.VolumeID = "vol-11" + testVolume11.Version = "321" + + require.NoError(t, err) + err = db.InsertVolume(ctx, &testVolume11) + require.NoError(t, err) + + versions, err := db.GetUsedVersions(ctx, testVolume1.TenantUUID) + require.NoError(t, err) + assert.Len(t, versions, 2) + assert.True(t, versions[testVolume1.Version]) + assert.True(t, versions[testVolume11.Version]) +} + +func TestGetAllUsedVersions(t *testing.T) { + ctx := context.TODO() + db := FakeMemoryDB() + testVolume1 := createTestVolume(1) + err := db.InsertVolume(ctx, &testVolume1) + testVolume11 := testVolume1 + testVolume11.VolumeID = "vol-11" + testVolume11.Version = "321" + + require.NoError(t, err) + err = db.InsertVolume(ctx, &testVolume11) + require.NoError(t, err) + + versions, err := db.GetAllUsedVersions(ctx) + require.NoError(t, err) + assert.Len(t, versions, 2) + assert.True(t, versions[testVolume1.Version]) + assert.True(t, versions[testVolume11.Version]) +} + +func TestGetUsedImageDigests(t *testing.T) { + ctx := context.TODO() + db := FakeMemoryDB() + testDynakube1 := createTestDynakube(1) + err := db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + + copyDynakube := testDynakube1 + copyDynakube.Name = "copy" + err = db.InsertDynakube(ctx, ©Dynakube) + require.NoError(t, err) + + testDynakube2 := createTestDynakube(2) + err = db.InsertDynakube(ctx, &testDynakube2) + require.NoError(t, err) + + digests, err := db.GetUsedImageDigests(ctx) + require.NoError(t, err) + assert.Len(t, digests, 2) + assert.True(t, digests[testDynakube1.ImageDigest]) + assert.True(t, digests[copyDynakube.ImageDigest]) + assert.True(t, digests[testDynakube2.ImageDigest]) +} + +func TestIsImageDigestUsed(t *testing.T) { + ctx := context.TODO() + db := FakeMemoryDB() + + isUsed, err := db.IsImageDigestUsed(ctx, "test") + require.NoError(t, err) + require.False(t, isUsed) + + testDynakube1 := createTestDynakube(1) + err = db.InsertDynakube(ctx, &testDynakube1) + require.NoError(t, err) + + isUsed, err = db.IsImageDigestUsed(ctx, testDynakube1.ImageDigest) + require.NoError(t, err) + require.True(t, isUsed) +} + +func TestGetPodNames(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testVolume2 := createTestVolume(2) + + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + err = db.InsertVolume(ctx, &testVolume2) + require.NoError(t, err) + + podNames, err := db.GetPodNames(ctx) + require.NoError(t, err) + assert.Len(t, podNames, 2) + assert.Equal(t, testVolume1.VolumeID, podNames[testVolume1.PodName]) + assert.Equal(t, testVolume2.VolumeID, podNames[testVolume2.PodName]) +} + +func TestDeleteVolume(t *testing.T) { + ctx := context.TODO() + testVolume1 := createTestVolume(1) + testVolume2 := createTestVolume(2) + + db := FakeMemoryDB() + err := db.InsertVolume(ctx, &testVolume1) + require.NoError(t, err) + err = db.InsertVolume(ctx, &testVolume2) + require.NoError(t, err) + + err = db.DeleteVolume(ctx, testVolume2.VolumeID) + require.NoError(t, err) + podNames, err := db.GetPodNames(ctx) + require.NoError(t, err) + assert.Len(t, podNames, 1) + assert.Equal(t, testVolume1.VolumeID, podNames[testVolume1.PodName]) +} diff --git a/pkg/controllers/csi/provisioner/controller.go b/pkg/controllers/csi/provisioner/controller.go index 85290e8623..fbd8eb3ad9 100644 --- a/pkg/controllers/csi/provisioner/controller.go +++ b/pkg/controllers/csi/provisioner/controller.go @@ -19,6 +19,7 @@ package csiprovisioner import ( "context" "fmt" + "time" dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" dtclient "github.com/Dynatrace/dynatrace-operator/pkg/clients/dynatrace" @@ -32,11 +33,9 @@ import ( "github.com/Dynatrace/dynatrace-operator/pkg/injection/codemodule/installer" "github.com/Dynatrace/dynatrace-operator/pkg/injection/codemodule/installer/image" "github.com/Dynatrace/dynatrace-operator/pkg/injection/codemodule/installer/url" - "github.com/Dynatrace/dynatrace-operator/pkg/oci/registry" "github.com/Dynatrace/dynatrace-operator/pkg/util/dtotel" "github.com/pkg/errors" "github.com/spf13/afero" - "gorm.io/gorm" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -46,8 +45,14 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +const ( + shortRequeueDuration = 1 * time.Minute + defaultRequeueDuration = 5 * time.Minute + longRequeueDuration = 30 * time.Minute +) + type urlInstallerBuilder func(afero.Fs, dtclient.Client, *url.Properties) installer.Installer -type imageInstallerBuilder func(context.Context, afero.Fs, *image.Properties) (installer.Installer, error) +type imageInstallerBuilder func(afero.Fs, *image.Properties) (installer.Installer, error) // OneAgentProvisioner reconciles a DynaKube object type OneAgentProvisioner struct { @@ -61,13 +66,12 @@ type OneAgentProvisioner struct { dynatraceClientBuilder dynatraceclient.Builder urlInstallerBuilder urlInstallerBuilder imageInstallerBuilder imageInstallerBuilder - registryClientBuilder registry.ClientBuilder opts dtcsi.CSIOptions path metadata.PathResolver } // NewOneAgentProvisioner returns a new OneAgentProvisioner -func NewOneAgentProvisioner(mgr manager.Manager, opts dtcsi.CSIOptions, db metadata.AccessCleaner) *OneAgentProvisioner { +func NewOneAgentProvisioner(mgr manager.Manager, opts dtcsi.CSIOptions, db metadata.Access) *OneAgentProvisioner { return &OneAgentProvisioner{ client: mgr.GetClient(), apiReader: mgr.GetAPIReader(), @@ -80,7 +84,6 @@ func NewOneAgentProvisioner(mgr manager.Manager, opts dtcsi.CSIOptions, db metad dynatraceClientBuilder: dynatraceclient.NewBuilder(mgr.GetAPIReader()), urlInstallerBuilder: url.NewUrlInstaller, imageInstallerBuilder: image.NewImageInstaller, - registryClientBuilder: registry.NewClient, } } @@ -96,15 +99,21 @@ func (provisioner *OneAgentProvisioner) Reconcile(ctx context.Context, request r ctx, span := dtotel.StartSpan(ctx, csiotel.Tracer(), csiotel.SpanOptions()...) defer span.End() - dk, err := provisioner.needsReconcile(ctx, request) + dk, err := provisioner.getDynaKube(ctx, request.NamespacedName) if err != nil { span.RecordError(err) - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err + if k8serrors.IsNotFound(err) { + return reconcile.Result{}, provisioner.db.DeleteDynakube(ctx, request.Name) + } + + return reconcile.Result{}, err } - if dk == nil { - return provisioner.collectGarbage(ctx, request) + if !dk.NeedsCSIDriver() { + log.Info("CSI driver provisioner not needed") + + return reconcile.Result{RequeueAfter: longRequeueDuration}, provisioner.db.DeleteDynakube(ctx, request.Name) } err = provisioner.setupFileSystem(dk) @@ -112,7 +121,7 @@ func (provisioner *OneAgentProvisioner) Reconcile(ctx context.Context, request r return reconcile.Result{}, err } - tenantConfig, err := provisioner.setupTenantConfig(dk) // needed for the CSI-resilience feature + dynakubeMetadata, err := provisioner.setupDynakubeMetadata(ctx, dk) // needed for the CSI-resilience feature if err != nil { return reconcile.Result{}, err } @@ -120,55 +129,26 @@ func (provisioner *OneAgentProvisioner) Reconcile(ctx context.Context, request r if !dk.NeedAppInjection() { log.Info("app injection not necessary, skip agent codemodule download", "dynakube", dk.Name) - return provisioner.collectGarbage(ctx, request) + return reconcile.Result{RequeueAfter: longRequeueDuration}, nil } if dk.CodeModulesImage() == "" && dk.CodeModulesVersion() == "" { log.Info("dynakube status is not yet ready, requeuing", "dynakube", dk.Name) - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err + return reconcile.Result{RequeueAfter: shortRequeueDuration}, err } - requeue, err := provisioner.provisionCodeModules(ctx, dk, tenantConfig) + err = provisioner.provisionCodeModules(ctx, dk, dynakubeMetadata) if err != nil { - if requeue { - return reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, err - } - return reconcile.Result{}, err } - return provisioner.collectGarbage(ctx, request) -} - -// needsReconcile checks if the DynaKube in the requests exists or needs any CSI functionality, if not then it runs the GC -func (provisioner *OneAgentProvisioner) needsReconcile(ctx context.Context, request reconcile.Request) (*dynatracev1beta2.DynaKube, error) { - dk, err := provisioner.getDynaKube(ctx, request.NamespacedName) + err = provisioner.collectGarbage(ctx, request) if err != nil { - if k8serrors.IsNotFound(err) { - log.Info("DynaKube was deleted, running cleanup") - - err := provisioner.db.DeleteTenantConfig(&metadata.TenantConfig{Name: request.Name}, true) - if err != nil { - return nil, err - } - } - - return nil, nil //nolint: nilnil - } - - if !dk.NeedsCSIDriver() { - log.Info("CSI driver provisioner not needed") - - err = provisioner.db.DeleteTenantConfig(&metadata.TenantConfig{Name: dk.Name}, true) - if err != nil { - return nil, err - } - - return nil, nil //nolint: nilnil + return reconcile.Result{}, err } - return dk, nil + return reconcile.Result{RequeueAfter: defaultRequeueDuration}, nil } func (provisioner *OneAgentProvisioner) setupFileSystem(dk *dynatracev1beta2.DynaKube) error { @@ -188,75 +168,51 @@ func (provisioner *OneAgentProvisioner) setupFileSystem(dk *dynatracev1beta2.Dyn return nil } -func (provisioner *OneAgentProvisioner) setupTenantConfig(dk *dynatracev1beta2.DynaKube) (*metadata.TenantConfig, error) { - metadataTenantConfig, err := provisioner.handleMetadata(dk) +func (provisioner *OneAgentProvisioner) setupDynakubeMetadata(ctx context.Context, dk *dynatracev1beta2.DynaKube) (*metadata.Dynakube, error) { + dynakubeMetadata, oldDynakubeMetadata, err := provisioner.handleMetadata(ctx, dk) if err != nil { return nil, err } - // Create/update the Dynakube's metadata TenantConfig entry while `LatestVersion` is not necessarily set + // Create/update the dynakubeMetadata entry while `LatestVersion` is not necessarily set // so the host oneagent-storages can be mounted before the standalone agent binaries are ready to be mounted - tenantConfig, err := provisioner.db.ReadTenantConfig(metadata.TenantConfig{Name: metadataTenantConfig.Name}) - if err != nil && errors.Is(err, gorm.ErrRecordNotFound) { - err = provisioner.db.CreateTenantConfig(metadataTenantConfig) - if err != nil { - return nil, err - } - - return metadataTenantConfig, nil - } else if err != nil { - return nil, err - } - - metadataTenantConfig.UID = tenantConfig.UID - err = provisioner.db.UpdateTenantConfig(metadataTenantConfig) - - if err != nil { - return nil, err - } - - return metadataTenantConfig, nil + return dynakubeMetadata, provisioner.createOrUpdateDynakubeMetadata(ctx, oldDynakubeMetadata, dynakubeMetadata) } -func (provisioner *OneAgentProvisioner) collectGarbage(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { +func (provisioner *OneAgentProvisioner) collectGarbage(ctx context.Context, request reconcile.Request) error { ctx, span := dtotel.StartSpan(ctx, csiotel.Tracer(), csiotel.SpanOptions()...) defer span.End() - result, err := provisioner.gc.Reconcile(ctx, request) + _, err := provisioner.gc.Reconcile(ctx, request) - if err != nil { - span.RecordError(err) - - return result, err - } - - return result, nil + return err } -func (provisioner *OneAgentProvisioner) provisionCodeModules(ctx context.Context, dk *dynatracev1beta2.DynaKube, tenantConfig *metadata.TenantConfig) (requeue bool, err error) { +func (provisioner *OneAgentProvisioner) provisionCodeModules(ctx context.Context, dk *dynatracev1beta2.DynaKube, dynakubeMetadata *metadata.Dynakube) error { + oldDynakubeMetadata := *dynakubeMetadata // creates a dt client and checks tokens exist for the given dynakube dtc, err := buildDtc(provisioner, ctx, dk) if err != nil { - return true, err + return err } - requeue, err = provisioner.updateAgentInstallation(ctx, dtc, tenantConfig, dk) - if err != nil { - return requeue, err + requeue, err := provisioner.updateAgentInstallation(ctx, dtc, dynakubeMetadata, dk) + if requeue || err != nil { + return err } // Set/Update the `LatestVersion` field in the database entry - err = provisioner.db.UpdateTenantConfig(tenantConfig) + err = provisioner.createOrUpdateDynakubeMetadata(ctx, oldDynakubeMetadata, dynakubeMetadata) if err != nil { - return true, err + return err } - return false, nil + return nil } func (provisioner *OneAgentProvisioner) updateAgentInstallation( ctx context.Context, dtc dtclient.Client, - tenantConfig *metadata.TenantConfig, + dynakubeMetadata *metadata.Dynakube, dk *dynatracev1beta2.DynaKube, ) ( requeue bool, @@ -273,46 +229,79 @@ func (provisioner *OneAgentProvisioner) updateAgentInstallation( } if dk.CodeModulesImage() != "" { - updatedImageURI, err := provisioner.installAgentImage(ctx, *dk, latestProcessModuleConfig) + updatedDigest, err := provisioner.installAgentImage(ctx, *dk, latestProcessModuleConfig) if err != nil { log.Info("error when updating agent from image", "error", err.Error()) // reporting error but not returning it to avoid immediate requeue and subsequently calling the API every few seconds return true, nil + } else if updatedDigest != "" { + dynakubeMetadata.LatestVersion = "" + dynakubeMetadata.ImageDigest = updatedDigest } - - tenantConfig.DownloadedCodeModuleVersion = updatedImageURI } else { updateVersion, err := provisioner.installAgentZip(ctx, *dk, dtc, latestProcessModuleConfig) if err != nil { log.Info("error when updating agent from zip", "error", err.Error()) // reporting error but not returning it to avoid immediate requeue and subsequently calling the API every few seconds return true, nil - } - - if updateVersion != "" { - tenantConfig.DownloadedCodeModuleVersion = updateVersion + } else if updateVersion != "" { + dynakubeMetadata.LatestVersion = updateVersion + dynakubeMetadata.ImageDigest = "" } } return false, nil } -func (provisioner *OneAgentProvisioner) handleMetadata(dk *dynatracev1beta2.DynaKube) (*metadata.TenantConfig, error) { - tenantUUID, err := dk.TenantUUIDFromApiUrl() // TODO update to use the tenant uuid from the DynaKube status +func (provisioner *OneAgentProvisioner) handleMetadata(ctx context.Context, dk *dynatracev1beta2.DynaKube) (*metadata.Dynakube, metadata.Dynakube, error) { + dynakubeMetadata, err := provisioner.db.GetDynakube(ctx, dk.Name) if err != nil { - return nil, err + return nil, metadata.Dynakube{}, errors.WithStack(err) } - newTenantConfig := &metadata.TenantConfig{ - UID: string(dk.UID), - Name: dk.Name, - TenantUUID: tenantUUID, - DownloadedCodeModuleVersion: dk.CodeModulesVersion(), - MaxFailedMountAttempts: int64(dk.FeatureMaxFailedCsiMountAttempts()), - ConfigDirPath: provisioner.path.AgentConfigDir(tenantUUID, dk.Name), + // In case of a new dynakubeMetadata + var oldDynakubeMetadata metadata.Dynakube + if dynakubeMetadata != nil { + oldDynakubeMetadata = *dynakubeMetadata } - return newTenantConfig, nil + tenantUUID, err := dk.TenantUUIDFromApiUrl() + if err != nil { + return nil, metadata.Dynakube{}, err + } + + dynakubeMetadata = metadata.NewDynakube( + dk.Name, + tenantUUID, + oldDynakubeMetadata.LatestVersion, + oldDynakubeMetadata.ImageDigest, + dk.FeatureMaxFailedCsiMountAttempts()) + + return dynakubeMetadata, oldDynakubeMetadata, nil +} + +func (provisioner *OneAgentProvisioner) createOrUpdateDynakubeMetadata(ctx context.Context, oldDynakube metadata.Dynakube, dynakube *metadata.Dynakube) error { + if oldDynakube != *dynakube { + log.Info("dynakube has changed", + "name", dynakube.Name, + "tenantUUID", dynakube.TenantUUID, + "version", dynakube.LatestVersion, + "max mount attempts", dynakube.MaxFailedMountAttempts) + + if oldDynakube == (metadata.Dynakube{}) { + log.Info("adding dynakube to db", "tenantUUID", dynakube.TenantUUID, "version", dynakube.LatestVersion) + + return provisioner.db.InsertDynakube(ctx, dynakube) + } else { + log.Info("updating dynakube in db", + "old version", oldDynakube.LatestVersion, "new version", dynakube.LatestVersion, + "old tenantUUID", oldDynakube.TenantUUID, "new tenantUUID", dynakube.TenantUUID) + + return provisioner.db.UpdateDynakube(ctx, dynakube) + } + } + + return nil } func buildDtc(provisioner *OneAgentProvisioner, ctx context.Context, dk *dynatracev1beta2.DynaKube) (dtclient.Client, error) { diff --git a/pkg/controllers/csi/provisioner/controller_test.go b/pkg/controllers/csi/provisioner/controller_test.go index 8253d14472..501b55337d 100644 --- a/pkg/controllers/csi/provisioner/controller_test.go +++ b/pkg/controllers/csi/provisioner/controller_test.go @@ -13,7 +13,6 @@ import ( "github.com/Dynatrace/dynatrace-operator/pkg/api/status" dynatracev1beta2 "github.com/Dynatrace/dynatrace-operator/pkg/api/v1beta2/dynakube" dtclient "github.com/Dynatrace/dynatrace-operator/pkg/clients/dynatrace" - dtcsi "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/csi/metadata" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/dynakube/connectioninfo" "github.com/Dynatrace/dynatrace-operator/pkg/controllers/dynakube/processmoduleconfigsecret" @@ -60,53 +59,63 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { ctx := context.Background() dynakubeName := "test-dk" - t.Run("no dynakube instance -> still try to GC according to database", func(t *testing.T) { + t.Run("no dynakube instance", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) - gc.Mock.On("Reconcile", mock.Anything, mock.Anything).Return(reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, nil) provisioner := &OneAgentProvisioner{ apiReader: fake.NewClient(), db: metadata.FakeMemoryDB(), gc: gc, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{}) require.NoError(t, err) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, result) + require.Equal(t, reconcile.Result{}, result) }) t.Run("dynakube deleted", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) - gc.Mock.On("Reconcile", mock.Anything, mock.Anything).Return(reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, nil) - db := metadata.FakeMemoryDB() - - tenantConfig := metadata.TenantConfig{ - TenantUUID: tenantUUID, - Name: dkName, - DownloadedCodeModuleVersion: agentVersion, - } - - err := db.CreateTenantConfig(&tenantConfig) - require.NoError(t, err) - + dynakube := metadata.Dynakube{TenantUUID: tenantUUID, LatestVersion: agentVersion, Name: dkName} + _ = db.InsertDynakube(ctx, &dynakube) provisioner := &OneAgentProvisioner{ apiReader: fake.NewClient(), db: db, gc: gc, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: tenantConfig.Name}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakube.Name}}) require.NoError(t, err) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, result) + require.Equal(t, reconcile.Result{}, result) - ten, err := db.ReadTenantConfig(metadata.TenantConfig{TenantUUID: tenantConfig.TenantUUID}) - require.Error(t, err) + ten, err := db.GetDynakube(ctx, dynakube.TenantUUID) + require.NoError(t, err) require.Nil(t, ten) }) - t.Run("no csi needed", func(t *testing.T) { + t.Run("application monitoring disabled", func(t *testing.T) { + gc := reconcilermock.NewReconciler(t) + provisioner := &OneAgentProvisioner{ + apiReader: fake.NewClient( + &dynatracev1beta2.DynaKube{ + ObjectMeta: metav1.ObjectMeta{ + Name: dynakubeName, + }, + Spec: dynatracev1beta2.DynaKubeSpec{ + OneAgent: dynatracev1beta2.OneAgentSpec{}, + }, + }, + ), + db: metadata.FakeMemoryDB(), + gc: gc, + } + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakubeName}}) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, reconcile.Result{RequeueAfter: longRequeueDuration}, result) + }) + t.Run("csi driver not enabled", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) - gc.Mock.On("Reconcile", mock.Anything, mock.Anything).Return(reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, nil) provisioner := &OneAgentProvisioner{ apiReader: fake.NewClient( &dynatracev1beta2.DynaKube{ @@ -115,7 +124,9 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { }, Spec: dynatracev1beta2.DynaKubeSpec{ OneAgent: dynatracev1beta2.OneAgentSpec{ - ClassicFullStack: &dynatracev1beta2.HostInjectSpec{}, + ApplicationMonitoring: &dynatracev1beta2.ApplicationMonitoringSpec{ + AppInjectionSpec: dynatracev1beta2.AppInjectionSpec{}, + }, }, }, }, @@ -123,13 +134,45 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { db: metadata.FakeMemoryDB(), gc: gc, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakubeName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakubeName}}) require.NoError(t, err) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, result) + require.Equal(t, reconcile.Result{RequeueAfter: longRequeueDuration}, result) }) - t.Run("host monitoring used -> no app inject is needed", func(t *testing.T) { + t.Run("csi driver disabled", func(t *testing.T) { + gc := reconcilermock.NewReconciler(t) + db := metadata.FakeMemoryDB() + _ = db.InsertDynakube(ctx, &metadata.Dynakube{Name: dynakubeName}) + provisioner := &OneAgentProvisioner{ + apiReader: fake.NewClient( + &dynatracev1beta2.DynaKube{ + ObjectMeta: metav1.ObjectMeta{ + Name: dynakubeName, + }, + Spec: dynatracev1beta2.DynaKubeSpec{ + OneAgent: dynatracev1beta2.OneAgentSpec{ + ApplicationMonitoring: &dynatracev1beta2.ApplicationMonitoringSpec{ + AppInjectionSpec: dynatracev1beta2.AppInjectionSpec{}, + }, + }, + }, + }, + ), + db: db, + gc: gc, + } + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dynakubeName}}) + + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, reconcile.Result{RequeueAfter: longRequeueDuration}, result) + + dynakubeMetadatas, err := db.GetAllDynakubes(ctx) + require.NoError(t, err) + require.Empty(t, dynakubeMetadatas) + }) + t.Run("host monitoring used", func(t *testing.T) { fakeClient := fake.NewClient( addFakeTenantUUID( &dynatracev1beta2.DynaKube{ @@ -156,8 +199,6 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { mockDtcBuilder := dtbuildermock.NewBuilder(t) gc := reconcilermock.NewReconciler(t) - gc.Mock.On("Reconcile", mock.Anything, mock.Anything).Return(reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, nil) - db := metadata.FakeMemoryDB() provisioner := &OneAgentProvisioner{ @@ -169,15 +210,15 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { path: metadata.PathResolver{}, dynatraceClientBuilder: mockDtcBuilder, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.NoError(t, err) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.LongRequeueDuration}, result) + require.Equal(t, reconcile.Result{RequeueAfter: longRequeueDuration}, result) - tenantConfigs, err := db.ReadTenantConfigs() + dynakubeMetadatas, err := db.GetAllDynakubes(ctx) require.NoError(t, err) - require.Len(t, tenantConfigs, 1) + require.Len(t, dynakubeMetadatas, 1) }) t.Run("no tokens", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) @@ -208,11 +249,10 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { db: metadata.FakeMemoryDB(), fs: afero.NewMemMapFs(), } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.EqualError(t, err, `secrets "`+dkName+`" not found`) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, result) }) t.Run("error when creating dynatrace client", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) @@ -258,11 +298,10 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { db: metadata.FakeMemoryDB(), fs: afero.NewMemMapFs(), } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.EqualError(t, err, "failed to create Dynatrace client: "+errorMsg) require.NotNil(t, result) - require.Equal(t, reconcile.Result{RequeueAfter: dtcsi.ShortRequeueDuration}, result) }) t.Run("error creating directories", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) @@ -299,7 +338,7 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { db: metadata.FakeMemoryDB(), gc: gc, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.EqualError(t, err, "failed to create directory "+tenantUUID+": "+errorMsg) require.NotNil(t, result) @@ -355,7 +394,7 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { urlInstallerBuilder: mockUrlInstallerBuilder(installerMock), } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) // "go test" breaks if the output does not end with a newline // making sure one is printed here @@ -406,15 +445,13 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { gc: gc, } - result, err := provisioner.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := provisioner.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.Error(t, err) require.Empty(t, result) }) t.Run("correct directories are created", func(t *testing.T) { gc := reconcilermock.NewReconciler(t) - gc.Mock.On("Reconcile", mock.Anything, mock.Anything).Return(reconcile.Result{}, nil) - memFs := afero.NewMemMapFs() memDB := metadata.FakeMemoryDB() dynakube := addFakeTenantUUID( @@ -438,7 +475,7 @@ func TestOneAgentProvisioner_Reconcile(t *testing.T) { gc: gc, } - result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) + result, err := r.Reconcile(context.TODO(), reconcile.Request{NamespacedName: types.NamespacedName{Name: dkName}}) require.NoError(t, err) require.NotNil(t, result) @@ -501,95 +538,57 @@ func buildValidApplicationMonitoringSpec(_ *testing.T) *dynatracev1beta2.Applica } } -func TestProvisioner_CreateTenantConfig(t *testing.T) { +func TestProvisioner_CreateDynakube(t *testing.T) { + ctx := context.Background() db := metadata.FakeMemoryDB() - - expectedOtherTenantConfig := metadata.TenantConfig{Name: otherDkName, TenantUUID: tenantUUID, DownloadedCodeModuleVersion: "v1", MaxFailedMountAttempts: 0} - db.CreateTenantConfig(&expectedOtherTenantConfig) - + expectedOtherDynakube := metadata.NewDynakube(otherDkName, tenantUUID, "v1", "", 0) + _ = db.InsertDynakube(ctx, expectedOtherDynakube) provisioner := &OneAgentProvisioner{ db: db, } - newTenantConfig := metadata.TenantConfig{Name: dkName, TenantUUID: tenantUUID, DownloadedCodeModuleVersion: "v1", MaxFailedMountAttempts: 0} + oldDynakube := metadata.Dynakube{} + newDynakube := metadata.NewDynakube(dkName, tenantUUID, "v1", "", 0) - err := provisioner.db.UpdateTenantConfig(&newTenantConfig) + err := provisioner.createOrUpdateDynakubeMetadata(ctx, oldDynakube, newDynakube) require.NoError(t, err) - storedTenantConfig, err := db.ReadTenantConfig(metadata.TenantConfig{Name: dkName}) + dynakube, err := db.GetDynakube(ctx, dkName) require.NoError(t, err) - require.NotNil(t, storedTenantConfig) + require.NotNil(t, dynakube) + require.Equal(t, *newDynakube, *dynakube) - newTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - storedTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - require.Equal(t, newTenantConfig, *storedTenantConfig) - - storedTenantConfig, err = db.ReadTenantConfig(metadata.TenantConfig{Name: otherDkName}) + otherDynakube, err := db.GetDynakube(ctx, otherDkName) require.NoError(t, err) - require.NotNil(t, storedTenantConfig) - - expectedOtherTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - storedTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - require.Equal(t, expectedOtherTenantConfig, *storedTenantConfig) + require.NotNil(t, dynakube) + require.Equal(t, *expectedOtherDynakube, *otherDynakube) } func TestProvisioner_UpdateDynakube(t *testing.T) { + ctx := context.Background() db := metadata.FakeMemoryDB() - - oldTenantConfig := metadata.TenantConfig{Name: dkName, TenantUUID: tenantUUID, DownloadedCodeModuleVersion: "v1", MaxFailedMountAttempts: 0} - _ = db.CreateTenantConfig(&oldTenantConfig) - expectedOtherTenantConfig := metadata.TenantConfig{Name: otherDkName, TenantUUID: tenantUUID, DownloadedCodeModuleVersion: "v1", MaxFailedMountAttempts: 0} - _ = db.CreateTenantConfig(&expectedOtherTenantConfig) + oldDynakube := metadata.NewDynakube(dkName, tenantUUID, "v1", "", 0) + _ = db.InsertDynakube(ctx, oldDynakube) + expectedOtherDynakube := metadata.NewDynakube(otherDkName, tenantUUID, "v1", "", 0) + _ = db.InsertDynakube(ctx, expectedOtherDynakube) provisioner := &OneAgentProvisioner{ db: db, } - newTenantConfig := metadata.TenantConfig{UID: oldTenantConfig.UID, Name: dkName, TenantUUID: "new-uuid", DownloadedCodeModuleVersion: "v2", MaxFailedMountAttempts: 0} - - err := provisioner.db.UpdateTenantConfig(&newTenantConfig) - require.NoError(t, err) - - tenantConfig, err := db.ReadTenantConfig(metadata.TenantConfig{Name: dkName}) - require.NoError(t, err) - require.NotNil(t, tenantConfig) - - newTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - tenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - require.Equal(t, newTenantConfig, *tenantConfig) + newDynakube := metadata.NewDynakube(dkName, "new-uuid", "v2", "", 0) - otherTenantConfig, err := db.ReadTenantConfig(metadata.TenantConfig{Name: otherDkName}) + err := provisioner.createOrUpdateDynakubeMetadata(ctx, *oldDynakube, newDynakube) require.NoError(t, err) - require.NotNil(t, otherTenantConfig) - - expectedOtherTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - otherTenantConfig.TimeStampedModel = metadata.TimeStampedModel{} - require.Equal(t, expectedOtherTenantConfig, *otherTenantConfig) -} - -func TestHandleMetadata(t *testing.T) { - dynakube := addFakeTenantUUID(&dynatracev1beta2.DynaKube{ - ObjectMeta: metav1.ObjectMeta{ - Name: dkName, - }, - Spec: dynatracev1beta2.DynaKubeSpec{ - APIURL: testAPIURL, - }, - }) - provisioner := &OneAgentProvisioner{ - db: metadata.FakeMemoryDB(), - } - dynakubeMetadata, err := provisioner.handleMetadata(dynakube) + dynakube, err := db.GetDynakube(ctx, dkName) require.NoError(t, err) - require.NotNil(t, dynakubeMetadata) - require.Equal(t, int64(dynatracev1beta2.DefaultMaxFailedCsiMountAttempts), dynakubeMetadata.MaxFailedMountAttempts) - - dynakube.Annotations = map[string]string{dynatracev1beta2.AnnotationFeatureMaxFailedCsiMountAttempts: "5"} - dynakubeMetadata, err = provisioner.handleMetadata(dynakube) + require.NotNil(t, dynakube) + require.Equal(t, *newDynakube, *dynakube) + otherDynakube, err := db.GetDynakube(ctx, otherDkName) require.NoError(t, err) - require.NotNil(t, dynakubeMetadata) - require.Equal(t, int64(5), dynakubeMetadata.MaxFailedMountAttempts) + require.NotNil(t, dynakube) + require.Equal(t, *expectedOtherDynakube, *otherDynakube) } func TestUpdateAgentInstallation(t *testing.T) { @@ -607,11 +606,14 @@ func TestUpdateAgentInstallation(t *testing.T) { dtc, err := mockDtcBuilder.Build() require.NoError(t, err) + path := metadata.PathResolver{RootDir: "test"} + base64Image := base64.StdEncoding.EncodeToString([]byte(dynakube.CodeModulesImage())) + targetDir := path.AgentSharedBinaryDirForAgent(base64Image) + mockK8sClient := createMockK8sClient(ctx, dynakube) installerMock := installermock.NewInstaller(t) - base64Image := base64.StdEncoding.EncodeToString([]byte(dynakube.CodeModulesImage())) installerMock. - On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), "test/codemodules/"+base64Image). + On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), targetDir). Return(true, nil) provisioner := &OneAgentProvisioner{ @@ -619,22 +621,23 @@ func TestUpdateAgentInstallation(t *testing.T) { dynatraceClientBuilder: mockDtcBuilder, apiReader: mockK8sClient, client: mockK8sClient, - path: metadata.PathResolver{RootDir: "test"}, + path: path, fs: afero.NewMemMapFs(), imageInstallerBuilder: mockImageInstallerBuilder(installerMock), recorder: &record.FakeRecorder{}, } - ruxitAgentProcPath := filepath.Join("test", "codemodules", base64Image, "agent", "conf", "ruxitagentproc.conf") - sourceRuxitAgentProcPath := filepath.Join("test", "codemodules", base64Image, "agent", "conf", "_ruxitagentproc.conf") + ruxitAgentProcPath := filepath.Join(targetDir, "agent", "conf", "ruxitagentproc.conf") + sourceRuxitAgentProcPath := filepath.Join(targetDir, "agent", "conf", "_ruxitagentproc.conf") setUpFS(provisioner.fs, ruxitAgentProcPath, sourceRuxitAgentProcPath) - tenantConfig := metadata.TenantConfig{Name: dkName, TenantUUID: tenantUUID, DownloadedCodeModuleVersion: agentVersion} - isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &tenantConfig, dynakube) + dynakubeMetadata := metadata.Dynakube{TenantUUID: tenantUUID, LatestVersion: agentVersion, Name: dkName} + isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &dynakubeMetadata, dynakube) require.NoError(t, err) - require.Equal(t, dynakube.CodeModulesImage(), tenantConfig.DownloadedCodeModuleVersion) + require.Equal(t, "", dynakubeMetadata.LatestVersion) + require.Equal(t, base64Image, dynakubeMetadata.ImageDigest) assert.False(t, isRequeue) }) t.Run("updateAgentInstallation with codeModules enabled errors and requeues", func(t *testing.T) { @@ -649,11 +652,14 @@ func TestUpdateAgentInstallation(t *testing.T) { dtc, err := mockDtcBuilder.Build() require.NoError(t, err) + path := metadata.PathResolver{RootDir: "test"} base64Image := base64.StdEncoding.EncodeToString([]byte(dynakube.CodeModulesImage())) + targetDir := path.AgentSharedBinaryDirForAgent(base64Image) + mockK8sClient := createMockK8sClient(ctx, dynakube) installerMock := installermock.NewInstaller(t) installerMock. - On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), "test/codemodules/"+base64Image). + On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), targetDir). Return(true, nil) provisioner := &OneAgentProvisioner{ @@ -667,11 +673,12 @@ func TestUpdateAgentInstallation(t *testing.T) { recorder: &record.FakeRecorder{}, } - tenantConfig := metadata.TenantConfig{TenantUUID: tenantUUID, DownloadedCodeModuleVersion: agentVersion, Name: dkName} - isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &tenantConfig, dynakube) + dynakubeMetadata := metadata.Dynakube{TenantUUID: tenantUUID, LatestVersion: agentVersion, Name: dkName} + isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &dynakubeMetadata, dynakube) require.NoError(t, err) - require.Equal(t, "12345", tenantConfig.DownloadedCodeModuleVersion) + require.Equal(t, "12345", dynakubeMetadata.LatestVersion) + require.Equal(t, "", dynakubeMetadata.ImageDigest) assert.True(t, isRequeue) }) t.Run("updateAgentInstallation without codeModules", func(t *testing.T) { @@ -706,13 +713,50 @@ func TestUpdateAgentInstallation(t *testing.T) { setUpFS(provisioner.fs, ruxitAgentProcPath, sourceRuxitAgentProcPath) - tenantConfig := metadata.TenantConfig{TenantUUID: tenantUUID, DownloadedCodeModuleVersion: agentVersion, Name: dkName} - isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &tenantConfig, dynakube) + dynakubeMetadata := metadata.Dynakube{TenantUUID: tenantUUID, LatestVersion: agentVersion, Name: dkName} + isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &dynakubeMetadata, dynakube) require.NoError(t, err) - require.Equal(t, "12345", tenantConfig.DownloadedCodeModuleVersion) + require.Equal(t, "12345", dynakubeMetadata.LatestVersion) + require.Equal(t, "", dynakubeMetadata.ImageDigest) assert.False(t, isRequeue) }) + t.Run("updateAgentInstallation without codeModules errors and requeues", func(t *testing.T) { + dynakube := getDynakube() + + mockDtcBuilder := dtbuildermock.NewBuilder(t) + + var dtc dtclient.Client + + mockDtcBuilder.On("Build").Return(dtc, nil) + dtc, err := mockDtcBuilder.Build() + require.NoError(t, err) + + mockK8sClient := createMockK8sClient(ctx, dynakube) + installerMock := installermock.NewInstaller(t) + installerMock. + On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), "test/codemodules"). + Return(true, nil) + + provisioner := &OneAgentProvisioner{ + db: metadata.FakeMemoryDB(), + dynatraceClientBuilder: mockDtcBuilder, + apiReader: mockK8sClient, + client: mockK8sClient, + path: metadata.PathResolver{RootDir: "test"}, + fs: afero.NewMemMapFs(), + recorder: &record.FakeRecorder{}, + urlInstallerBuilder: mockUrlInstallerBuilder(installerMock), + } + + dynakubeMetadata := metadata.Dynakube{TenantUUID: tenantUUID, LatestVersion: agentVersion, Name: dkName} + isRequeue, err := provisioner.updateAgentInstallation(ctx, dtc, &dynakubeMetadata, dynakube) + require.NoError(t, err) + + require.Equal(t, "12345", dynakubeMetadata.LatestVersion) + require.Equal(t, "", dynakubeMetadata.ImageDigest) + assert.True(t, isRequeue) + }) } func createMockK8sClient(ctx context.Context, dynakube *dynatracev1beta2.DynaKube) client.Client { diff --git a/pkg/controllers/csi/provisioner/install.go b/pkg/controllers/csi/provisioner/install.go index 1ca7cea415..0ae3cd11e1 100644 --- a/pkg/controllers/csi/provisioner/install.go +++ b/pkg/controllers/csi/provisioner/install.go @@ -21,34 +21,21 @@ func (provisioner *OneAgentProvisioner) installAgentImage( dynakube dynatracev1beta2.DynaKube, latestProcessModuleConfig *dtclient.ProcessModuleConfig, ) ( - targetImage string, - err error, + string, + error, ) { tenantUUID, err := dynakube.TenantUUIDFromApiUrl() if err != nil { return "", err } - targetImage = dynakube.CodeModulesImage() + targetImage := dynakube.CodeModulesImage() // An image URI often contains one or several /-s, which is problematic when trying to use it as a folder name. // Easiest to just base64 encode it base64Image := base64.StdEncoding.EncodeToString([]byte(targetImage)) targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64Image) targetConfigDir := provisioner.path.AgentConfigDir(tenantUUID, dynakube.GetName()) - defer func() { - if err == nil { - err = processmoduleconfig.CreateAgentConfigDir(provisioner.fs, targetConfigDir, targetDir, latestProcessModuleConfig) - } - }() - - codeModule, err := provisioner.db.ReadCodeModule(metadata.CodeModule{Version: targetImage}) - if codeModule != nil { - log.Info("target image already downloaded", "image", targetImage, "path", targetDir) - - return targetImage, nil - } - props := &image.Properties{ ImageUri: targetImage, ApiReader: provisioner.apiReader, @@ -57,7 +44,7 @@ func (provisioner *OneAgentProvisioner) installAgentImage( Metadata: provisioner.db, } - imageInstaller, err := provisioner.imageInstallerBuilder(ctx, provisioner.fs, props) + imageInstaller, err := provisioner.imageInstallerBuilder(provisioner.fs, props) if err != nil { return "", err } @@ -72,15 +59,12 @@ func (provisioner *OneAgentProvisioner) installAgentImage( return "", err } - err = provisioner.db.CreateCodeModule(&metadata.CodeModule{ - Version: targetImage, - Location: targetDir, - }) + err = processmoduleconfig.CreateAgentConfigDir(provisioner.fs, targetConfigDir, targetDir, latestProcessModuleConfig) if err != nil { return "", err } - return targetImage, err + return base64Image, err } func (provisioner *OneAgentProvisioner) installAgentZip(ctx context.Context, dynakube dynatracev1beta2.DynaKube, dtc dtclient.Client, latestProcessModuleConfig *dtclient.ProcessModuleConfig) (string, error) { @@ -95,19 +79,6 @@ func (provisioner *OneAgentProvisioner) installAgentZip(ctx context.Context, dyn targetDir := provisioner.path.AgentSharedBinaryDirForAgent(targetVersion) targetConfigDir := provisioner.path.AgentConfigDir(tenantUUID, dynakube.GetName()) - defer func() { - if err == nil { - err = processmoduleconfig.CreateAgentConfigDir(provisioner.fs, targetConfigDir, targetDir, latestProcessModuleConfig) - } - }() - - codeModule, err := provisioner.db.ReadCodeModule(metadata.CodeModule{Version: targetVersion}) - if codeModule != nil { - log.Info("target version already downloaded", "version", targetVersion, "path", targetDir) - - return targetVersion, nil - } - ctx, span := dtotel.StartSpan(ctx, csiotel.Tracer(), csiotel.SpanOptions()...) defer span.End() @@ -118,10 +89,7 @@ func (provisioner *OneAgentProvisioner) installAgentZip(ctx context.Context, dyn return "", err } - err = provisioner.db.CreateCodeModule(&metadata.CodeModule{ - Version: targetVersion, - Location: targetDir, - }) + err = processmoduleconfig.CreateAgentConfigDir(provisioner.fs, targetConfigDir, targetDir, latestProcessModuleConfig) if err != nil { return "", err } diff --git a/pkg/controllers/csi/provisioner/install_test.go b/pkg/controllers/csi/provisioner/install_test.go index 7981e75fca..19737f0f35 100644 --- a/pkg/controllers/csi/provisioner/install_test.go +++ b/pkg/controllers/csi/provisioner/install_test.go @@ -36,6 +36,7 @@ const ( func TestUpdateAgent(t *testing.T) { ctx := context.Background() testVersion := "test" + testImage := "my-image/1223:123" t.Run("zip install", func(t *testing.T) { dk := createTestDynaKubeWithZip(testVersion) @@ -112,13 +113,12 @@ func TestUpdateAgent(t *testing.T) { }) t.Run("failed install", func(t *testing.T) { dockerconfigjsonContent := `{"auths":{}}` - dk := createTestDynaKubeWithImage() + dk := createTestDynaKubeWithImage(testImage) provisioner := createTestProvisioner(createMockedPullSecret(dk, dockerconfigjsonContent)) var revision uint = 3 processModule := createTestProcessModuleConfig(revision) - base64Image := base64.StdEncoding.EncodeToString([]byte(dk.CodeModulesImage())) - targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64Image) + targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64.StdEncoding.EncodeToString([]byte(testImage))) installerMock := installermock.NewInstaller(t) installerMock. On("InstallAgent", mock.AnythingOfType("*context.valueCtx"), targetDir). @@ -146,9 +146,9 @@ func TestUpdateAgent(t *testing.T) { var revision uint = 3 processModule := createTestProcessModuleConfig(revision) - dk := createTestDynaKubeWithImage() - base64Image := base64.StdEncoding.EncodeToString([]byte(dk.CodeModulesImage())) + dk := createTestDynaKubeWithImage(testImage) provisioner := createTestProvisioner(createMockedPullSecret(dk, dockerconfigjsonContent)) + base64Image := base64.StdEncoding.EncodeToString([]byte(testImage)) targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64Image) installerMock := installermock.NewInstaller(t) installerMock. @@ -159,7 +159,7 @@ func TestUpdateAgent(t *testing.T) { currentVersion, err := provisioner.installAgentImage(ctx, dk, processModule) require.NoError(t, err) - assert.Equal(t, dk.CodeModulesImage(), currentVersion) + assert.Equal(t, base64Image, currentVersion) }) t.Run("codeModulesImage set with custom pull secret", func(t *testing.T) { pullSecretName := "test-pull-secret" @@ -168,11 +168,11 @@ func TestUpdateAgent(t *testing.T) { var revision uint = 3 processModule := createTestProcessModuleConfig(revision) - dk := createTestDynaKubeWithImage() + dk := createTestDynaKubeWithImage(testImage) dk.Spec.CustomPullSecret = pullSecretName provisioner := createTestProvisioner(createMockedPullSecret(dk, dockerconfigjsonContent)) - base64Image := base64.StdEncoding.EncodeToString([]byte(dk.CodeModulesImage())) + base64Image := base64.StdEncoding.EncodeToString([]byte(testImage)) targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64Image) installerMock := installermock.NewInstaller(t) installerMock. @@ -183,7 +183,7 @@ func TestUpdateAgent(t *testing.T) { currentVersion, err := provisioner.installAgentImage(ctx, dk, processModule) require.NoError(t, err) - assert.Equal(t, dk.CodeModulesImage(), currentVersion) + assert.Equal(t, base64Image, currentVersion) }) t.Run("codeModulesImage + trustedCA set", func(t *testing.T) { pullSecretName := "test-pull-secret" @@ -216,12 +216,12 @@ NK85cEJwyxQ+wahdNGUD var revision uint = 3 processModule := createTestProcessModuleConfig(revision) - dk := createTestDynaKubeWithImage() + dk := createTestDynaKubeWithImage(testImage) dk.Spec.CustomPullSecret = pullSecretName dk.Spec.TrustedCAs = trustedCAName provisioner := createTestProvisioner(createMockedPullSecret(dk, dockerconfigjsonContent), createMockedCAConfigMap(dk, customCertContent)) - base64Image := base64.StdEncoding.EncodeToString([]byte(dk.CodeModulesImage())) + base64Image := base64.StdEncoding.EncodeToString([]byte(testImage)) targetDir := provisioner.path.AgentSharedBinaryDirForAgent(base64Image) installerMock := installermock.NewInstaller(t) installerMock. @@ -232,7 +232,7 @@ NK85cEJwyxQ+wahdNGUD currentVersion, err := provisioner.installAgentImage(ctx, dk, processModule) require.NoError(t, err) - assert.Equal(t, dk.CodeModulesImage(), currentVersion) + assert.Equal(t, base64Image, currentVersion) }) } @@ -270,9 +270,7 @@ func createMockedCAConfigMap(dynakube dynatracev1beta2.DynaKube, certContent str } } -func createTestDynaKubeWithImage() dynatracev1beta2.DynaKube { - imageID := "some.registry.com/image:1.234.345" - +func createTestDynaKubeWithImage(image string) dynatracev1beta2.DynaKube { return *addFakeTenantUUID(&dynatracev1beta2.DynaKube{ ObjectMeta: metav1.ObjectMeta{ Name: "test-dk", @@ -284,7 +282,7 @@ func createTestDynaKubeWithImage() dynatracev1beta2.DynaKube { Status: dynatracev1beta2.DynaKubeStatus{ CodeModules: dynatracev1beta2.CodeModulesStatus{ VersionStatus: status.VersionStatus{ - ImageID: imageID, + ImageID: image, }, }, }, @@ -330,7 +328,7 @@ func createTestProvisioner(obj ...client.Object) *OneAgentProvisioner { } func mockImageInstallerBuilder(mock *installermock.Installer) imageInstallerBuilder { - return func(_ context.Context, _ afero.Fs, _ *image.Properties) (installer.Installer, error) { + return func(_ afero.Fs, _ *image.Properties) (installer.Installer, error) { return mock, nil } } diff --git a/pkg/injection/codemodule/installer/image/installer.go b/pkg/injection/codemodule/installer/image/installer.go index 27b63570dd..a4486faeb4 100644 --- a/pkg/injection/codemodule/installer/image/installer.go +++ b/pkg/injection/codemodule/installer/image/installer.go @@ -29,7 +29,8 @@ type Properties struct { ImageDigest string } -func NewImageInstaller(ctx context.Context, fs afero.Fs, props *Properties) (installer.Installer, error) { +func NewImageInstaller(fs afero.Fs, props *Properties) (installer.Installer, error) { + ctx := context.TODO() pullSecret := props.Dynakube.PullSecretWithoutData() defaultTransport := http.DefaultTransport.(*http.Transport).Clone() @@ -64,7 +65,7 @@ func (installer *Installer) InstallAgent(_ context.Context, targetDir string) (b log.Info("installing agent from image") if installer.isAlreadyPresent(targetDir) { - log.Info("agent already installed", "target dir", targetDir) + log.Info("agent already installed", "image", installer.props.ImageUri, "target dir", targetDir) return false, nil } @@ -76,7 +77,7 @@ func (installer *Installer) InstallAgent(_ context.Context, targetDir string) (b return false, errors.WithStack(err) } - log.Info("installing agent", "target dir", targetDir) + log.Info("installing agent", "image", installer.props.ImageUri, "target dir", targetDir) if err := installer.installAgentFromImage(targetDir); err != nil { _ = installer.fs.RemoveAll(targetDir) diff --git a/pkg/injection/codemodule/installer/image/installer_test.go b/pkg/injection/codemodule/installer/image/installer_test.go index baf585d8c8..7c122d83b1 100644 --- a/pkg/injection/codemodule/installer/image/installer_test.go +++ b/pkg/injection/codemodule/installer/image/installer_test.go @@ -60,7 +60,6 @@ func testFileSystemWithSharedDirPresent(pathResolver metadata.PathResolver, imag } func TestNewImageInstaller(t *testing.T) { - ctx := context.Background() testFS := afero.NewMemMapFs() dynakube := &dynatracev1beta2.DynaKube{ ObjectMeta: metav1.ObjectMeta{ @@ -82,7 +81,7 @@ func TestNewImageInstaller(t *testing.T) { ImageDigest: testImageDigest, ApiReader: fakeClient, } - in, err := NewImageInstaller(ctx, testFS, props) + in, err := NewImageInstaller(testFS, props) require.NoError(t, err) assert.NotNil(t, in) assert.NotNil(t, in) diff --git a/pkg/injection/codemodule/installer/image/unpack.go b/pkg/injection/codemodule/installer/image/unpack.go index 337689c654..b07560bd7a 100644 --- a/pkg/injection/codemodule/installer/image/unpack.go +++ b/pkg/injection/codemodule/installer/image/unpack.go @@ -2,7 +2,6 @@ package image import ( "context" - "encoding/base64" "fmt" "path" "path/filepath" @@ -75,10 +74,7 @@ func (installer *Installer) pullOCIimage(image containerv1.Image, imageName stri return errors.WithStack(err) } - // ref.String() is consistent with what the user gave, ref.Name() could add some prefix depending on the situation. - // It doesn't really matter here as it's only for a temporary dir, but it's still better be consistent. - imageCachePath := path.Join(imageCacheDir, base64.StdEncoding.EncodeToString([]byte(ref.String()))) - if err := crane.SaveOCI(image, imageCachePath); err != nil { + if err := crane.SaveOCI(image, path.Join(imageCacheDir, ref.Identifier())); err != nil { log.Info("saving v1.Image img as an OCI Image Layout at path", imageCacheDir, err) return errors.WithMessagef(err, "saving v1.Image img as an OCI Image Layout at path %s", imageCacheDir) @@ -91,7 +87,7 @@ func (installer *Installer) pullOCIimage(image containerv1.Image, imageName stri return errors.WithStack(err) } - err = installer.unpackOciImage(layers, imageCachePath, targetDir) + err = installer.unpackOciImage(layers, filepath.Join(imageCacheDir, ref.Identifier()), targetDir) if err != nil { log.Info("failed to unpackOciImage", "error", err) diff --git a/pkg/util/testing/partial_equal.go b/pkg/util/testing/partial_equal.go deleted file mode 100644 index 19b6cf809a..0000000000 --- a/pkg/util/testing/partial_equal.go +++ /dev/null @@ -1,31 +0,0 @@ -package testing - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type tHelper interface { - Helper() -} - -// PartialEqual asserts that two objects are equal, depending on what equal means -// -// For instance, you may pass options to ignore certain fields -func PartialEqual(t require.TestingT, expected, actual any, diffOpts cmp.Option, msgAndArgs ...any) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if cmp.Equal(expected, actual, diffOpts) { - return - } - - diff := cmp.Diff(expected, actual, diffOpts) - assert.Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) -}