diff --git a/.circleci/config.yml b/.circleci/config.yml index b2d2acb077..820fade5a4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -59,7 +59,7 @@ steps_prepare_testing_k8s_k3s: &steps_prepare_testing_k8s_k3s type: kubernetes config: kubernetes-cluster-name: k3d-k3s-default - storage-class: standard + storage-class: local-path enclave-size-in-megabytes: 2048 EOF # Set the K3S cluster with some previous steps to force this cluster type in the cluster-setting file. This save us to start the engine with the cluster set command diff --git a/cli/cli/kurtosis_config/resolved_config/kurtosis_cluster_config.go b/cli/cli/kurtosis_config/resolved_config/kurtosis_cluster_config.go index 3cc337a51f..1088d8e49e 100644 --- a/cli/cli/kurtosis_config/resolved_config/kurtosis_cluster_config.go +++ b/cli/cli/kurtosis_config/resolved_config/kurtosis_cluster_config.go @@ -145,7 +145,7 @@ func getSuppliers(clusterId string, clusterType KurtosisClusterType, kubernetesC } backendSupplier = func(ctx context.Context) (backend_interface.KurtosisBackend, error) { - backend, err := kubernetes_kurtosis_backend.GetCLIBackend(ctx) + backend, err := kubernetes_kurtosis_backend.GetCLIBackend(ctx, *kubernetesConfig.StorageClass) if err != nil { return nil, stacktrace.Propagate( err, diff --git a/cli/cli/kurtosis_gateway/connection/provider.go b/cli/cli/kurtosis_gateway/connection/provider.go index fbe5bf4e23..fa74585772 100644 --- a/cli/cli/kurtosis_gateway/connection/provider.go +++ b/cli/cli/kurtosis_gateway/connection/provider.go @@ -22,6 +22,8 @@ import ( const ( grpcPortIdStr = "grpc" httpApplicationProtocol = "http" + // this doesn't have any effect as this is just the gateway + emptyStorageClassName = "" ) var noWait *port_spec.Wait = nil @@ -42,7 +44,7 @@ func NewGatewayConnectionProvider(ctx context.Context, kubernetesConfig *restcli if err != nil { return nil, stacktrace.Propagate(err, "Expected to be able to get config for Kubernetes client set, instead a non nil error was returned") } - kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig) + kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig, emptyStorageClassName) return &GatewayConnectionProvider{ config: kubernetesConfig, diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend.go index ee58651bfd..0ddf22100a 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend.go @@ -67,8 +67,9 @@ func NewAPIContainerKubernetesKurtosisBackend( kubernetesManager *kubernetes_manager.KubernetesManager, ownEnclaveUuid enclave.EnclaveUUID, ownNamespaceName string, + storageClassName string, ) *KubernetesKurtosisBackend { - modeArgs := shared_helpers.NewApiContainerModeArgs(ownEnclaveUuid, ownNamespaceName) + modeArgs := shared_helpers.NewApiContainerModeArgs(ownEnclaveUuid, ownNamespaceName, storageClassName) return newKubernetesKurtosisBackend( kubernetesManager, nil, diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_enclave_functions.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_enclave_functions.go index c651cb4480..84611e6aaa 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_enclave_functions.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_enclave_functions.go @@ -36,8 +36,6 @@ type enclaveKubernetesResources struct { // StopEnclave services []apiv1.Service - persistentVolumes []apiv1.PersistentVolume - clusterRoles []rbacv1.ClusterRole clusterRoleBindings []rbacv1.ClusterRoleBinding @@ -101,7 +99,6 @@ func (backend *KubernetesKurtosisBackend) CreateEnclave( namespace: enclaveNamespace, pods: []apiv1.Pod{}, services: nil, - persistentVolumes: []apiv1.PersistentVolume{}, clusterRoles: []rbacv1.ClusterRole{}, clusterRoleBindings: []rbacv1.ClusterRoleBinding{}, } @@ -299,21 +296,6 @@ func (backend *KubernetesKurtosisBackend) DestroyEnclaves( } } - // Remove persistent volume - if resources.persistentVolumes != nil { - for _, persistentVolume := range resources.persistentVolumes { - if err := backend.kubernetesManager.RemovePersistentVolume(ctx, persistentVolume.Name); err != nil { - erroredEnclaveIds[enclaveId] = stacktrace.Propagate( - err, - "An error occurred removing persistent volume '%v' for enclave '%v'", - persistentVolume.Name, - enclaveId, - ) - continue - } - } - } - // Remove custom API container Cluster Role Bindings if resources.clusterRoleBindings != nil { for _, clusterRoleBinding := range resources.clusterRoleBindings { @@ -519,7 +501,7 @@ func (backend *KubernetesKurtosisBackend) createGetEnclaveResourcesOperation( } // Pods and Services - podsList, servicesList, persistentVolumesList, clusterRolesList, clusterRoleBindingsList, err := backend.kubernetesManager.GetAllEnclaveResourcesByLabels( + podsList, servicesList, clusterRolesList, clusterRoleBindingsList, err := backend.kubernetesManager.GetAllEnclaveResourcesByLabels( ctx, namespaceName, enclaveWithIDMatchLabels, @@ -534,9 +516,6 @@ func (backend *KubernetesKurtosisBackend) createGetEnclaveResourcesOperation( var services []apiv1.Service services = append(services, servicesList.Items...) - var persistentVolumes []apiv1.PersistentVolume - persistentVolumes = append(persistentVolumes, persistentVolumesList.Items...) - var clusterRoles []rbacv1.ClusterRole clusterRoles = append(clusterRoles, clusterRolesList.Items...) @@ -547,7 +526,6 @@ func (backend *KubernetesKurtosisBackend) createGetEnclaveResourcesOperation( namespace: namespace, pods: pods, services: services, - persistentVolumes: persistentVolumes, clusterRoles: clusterRoles, clusterRoleBindings: clusterRoleBindings, } diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_helper.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_helper.go index e079dce607..68c3c4c124 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_helper.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/kubernetes_kurtosis_backend_helper.go @@ -21,7 +21,7 @@ var kubeConfigFileFilepath = filepath.Join( os.Getenv("HOME"), ".kube", "config", ) -func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, error) { +func GetCLIBackend(ctx context.Context, storageClass string) (backend_interface.KurtosisBackend, error) { kubernetesConfig, err := clientcmd.BuildConfigFromFlags(emptyMasterURL, kubeConfigFileFilepath) if err != nil { return nil, stacktrace.Propagate(err, "An error occurred creating kubernetes configuration from flags in file '%v'", kubeConfigFileFilepath) @@ -35,6 +35,7 @@ func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, erro ctx, kubernetesConfig, backendSupplier, + storageClass, ) if err != nil { return nil, stacktrace.Propagate(err, "An error occurred wrapping the CLI Kubernetes backend") @@ -44,7 +45,7 @@ func GetCLIBackend(ctx context.Context) (backend_interface.KurtosisBackend, erro } func GetEngineServerBackend( - ctx context.Context, + ctx context.Context, storageClass string, ) (backend_interface.KurtosisBackend, error) { kubernetesConfig, err := rest.InClusterConfig() if err != nil { @@ -61,6 +62,7 @@ func GetEngineServerBackend( ctx, kubernetesConfig, backendSupplier, + storageClass, ) if err != nil { return nil, stacktrace.Propagate(err, "An error occurred wrapping the Kurtosis Engine Kubernetes backend") @@ -71,6 +73,7 @@ func GetEngineServerBackend( func GetApiContainerBackend( ctx context.Context, + storageClass string, ) (backend_interface.KurtosisBackend, error) { kubernetesConfig, err := rest.InClusterConfig() if err != nil { @@ -103,6 +106,7 @@ func GetApiContainerBackend( kubernetesManager, enclaveId, namespaceName, + storageClass, ), nil } @@ -110,6 +114,7 @@ func GetApiContainerBackend( ctx, kubernetesConfig, backendSupplier, + storageClass, ) if err != nil { return nil, stacktrace.Propagate(err, "An error occurred wrapping the APIC Kubernetes backend") @@ -127,13 +132,14 @@ func getWrappedKubernetesKurtosisBackend( ctx context.Context, kubernetesConfig *rest.Config, kurtosisBackendSupplier func(context.Context, *kubernetes_manager.KubernetesManager) (*KubernetesKurtosisBackend, error), + storageClass string, ) (*metrics_reporting.MetricsReportingKurtosisBackend, error) { clientSet, err := kubernetes.NewForConfig(kubernetesConfig) if err != nil { return nil, stacktrace.Propagate(err, "Expected to be able to create kubernetes client set using Kubernetes config '%+v', instead a non nil error was returned", kubernetesConfig) } - kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig) + kubernetesManager := kubernetes_manager.NewKubernetesManager(clientSet, kubernetesConfig, storageClass) kubernetesBackend, err := kurtosisBackendSupplier(ctx, kubernetesManager) if err != nil { diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/shared_helpers/shared_helpers.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/shared_helpers/shared_helpers.go index a79f054d82..72a70ba9ee 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/shared_helpers/shared_helpers.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/shared_helpers/shared_helpers.go @@ -102,11 +102,11 @@ type dumpPodResult struct { func NewApiContainerModeArgs( ownEnclaveId enclave.EnclaveUUID, - ownNamespaceName string) *ApiContainerModeArgs { + ownNamespaceName string, storageClassName string) *ApiContainerModeArgs { return &ApiContainerModeArgs{ ownEnclaveId: ownEnclaveId, ownNamespaceName: ownNamespaceName, - storageClassName: "", + storageClassName: storageClassName, filesArtifactExpansionVolumeSizeInMegabytes: 0, } } diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/persistent_directories.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/persistent_directories.go index 37626e9dbf..ef33c2dd2c 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/persistent_directories.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/persistent_directories.go @@ -12,14 +12,12 @@ import ( ) type kubernetesVolumeWithClaim struct { - VolumeName string - VolumeClaimName string } func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolume() *apiv1.Volume { return &apiv1.Volume{ - Name: volumeAndClaim.VolumeName, + Name: volumeAndClaim.VolumeClaimName, VolumeSource: apiv1.VolumeSource{ HostPath: nil, EmptyDir: nil, @@ -59,7 +57,7 @@ func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolume() *apiv1.Volume { func (volumeAndClaim *kubernetesVolumeWithClaim) GetVolumeMount(mountPath string) *apiv1.VolumeMount { return &apiv1.VolumeMount{ - Name: volumeAndClaim.VolumeName, + Name: volumeAndClaim.VolumeClaimName, ReadOnly: false, MountPath: mountPath, SubPath: "", @@ -77,7 +75,6 @@ func preparePersistentDirectoriesResources( kubernetesManager *kubernetes_manager.KubernetesManager, ) (map[string]*kubernetesVolumeWithClaim, error) { shouldDeleteVolumesAndClaimsCreated := true - volumesCreated := map[string]*apiv1.PersistentVolume{} volumeClaimsCreated := map[string]*apiv1.PersistentVolumeClaim{} persistentVolumesAndClaims := map[string]*kubernetesVolumeWithClaim{} @@ -96,16 +93,7 @@ func preparePersistentDirectoriesResources( persistentVolumeSize := int64(persistentDirectory.Size) - var persistentVolume *apiv1.PersistentVolume - if persistentVolume, err = kubernetesManager.GetPersistentVolume(ctx, volumeName); err != nil { - persistentVolume, err = kubernetesManager.CreatePersistentVolume(ctx, namespace, volumeName, volumeLabelsStrs, persistentVolumeSize) - if err != nil { - return nil, stacktrace.Propagate(err, "An error occurred creating the persistent volume for '%s'", persistentDirectory.PersistentKey) - } - volumesCreated[persistentVolume.Name] = persistentVolume - } - - // For now, we have a 1:1 mapping between volume and volume claims, so it's fine giving it the same name + // This claim works with a dynamic driver - it will spin up its own volume - the volume will get deleted when said claims is deleted var persistentVolumeClaim *apiv1.PersistentVolumeClaim if persistentVolumeClaim, err = kubernetesManager.GetPersistentVolumeClaim(ctx, namespace, volumeName); err != nil { persistentVolumeClaim, err = kubernetesManager.CreatePersistentVolumeClaim(ctx, namespace, volumeName, volumeLabelsStrs, persistentVolumeSize) @@ -116,7 +104,6 @@ func preparePersistentDirectoriesResources( } persistentVolumesAndClaims[dirPath] = &kubernetesVolumeWithClaim{ - VolumeName: persistentVolume.Name, VolumeClaimName: persistentVolumeClaim.Name, } } @@ -136,17 +123,6 @@ func preparePersistentDirectoriesResources( logrus.Warnf("You'll need to clean up volume claim '%v' manually!", volumeClaimNameStr) } } - for volumeNameStr := range volumesCreated { - // Background context so we still run this even if the input context was cancelled - if err := kubernetesManager.RemovePersistentVolumeClaim(context.Background(), namespace, volumeNameStr); err != nil { - logrus.Warnf( - "Creating persistent directory volumes didn't complete successfully so we tried to delete volume '%v' that we created, but doing so threw an error:\n%v", - volumeNameStr, - err, - ) - logrus.Warnf("You'll need to clean up volume '%v' manually!", volumeNameStr) - } - } }() shouldDeleteVolumesAndClaimsCreated = false diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/start_user_services.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/start_user_services.go index aae79f02c3..1a09e97937 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/start_user_services.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_kurtosis_backend/user_services_functions/start_user_services.go @@ -372,15 +372,10 @@ func createStartServiceOperation( } for _, volumeAndClaim := range createVolumesWithClaims { volumeClaimName := volumeAndClaim.VolumeClaimName - volumeName := volumeAndClaim.VolumeName if err := kubernetesManager.RemovePersistentVolumeClaim(ctx, namespaceName, volumeClaimName); err != nil { logrus.Errorf("Starting service didn't complete successfully so we tried to remove the persistent volume claim we created but doing so threw an error:\n%v", err) logrus.Errorf("ACTION REQUIRED: You'll need to remove persistent volume claim '%v' in '%v' manually!!!", volumeClaimName, namespaceName) } - if err := kubernetesManager.RemovePersistentVolume(ctx, volumeAndClaim.VolumeName); err != nil { - logrus.Errorf("Starting service didn't complete successfully so we tried to remove the persistent volume we created but doing so threw an error:\n%v", err) - logrus.Errorf("ACTION REQUIRED: You'll need to remove persistent volume '%v' manually!!!", volumeName) - } } }() diff --git a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_manager/kubernetes_manager.go b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_manager/kubernetes_manager.go index d8fbd883c6..b1831e8c08 100644 --- a/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_manager/kubernetes_manager.go +++ b/container-engine-lib/lib/backend_impls/kubernetes/kubernetes_manager/kubernetes_manager.go @@ -14,7 +14,6 @@ import ( "net/http" "net/url" "os" - "path" "strconv" "strings" "sync" @@ -72,13 +71,6 @@ const ( listOptionsTimeoutSeconds int64 = 10 contextDeadlineExceeded = "context deadline exceeded" expectedStatusMessageSliceSize = 6 - - volumeHostPathRootDirectory = "/kurtosis-persistent-service-data" - // TODO: Maybe pipe this to Starlark to let users choose the size of their persistent directories - // The difficulty is that Docker doesn't have such a feature, so we would need somehow to hack it - waitForPersistentVolumeBoundTimeout = 30 * time.Second - waitForPersistentVolumeBoundInitialDelayMilliSeconds = 100 - waitForPersistentVolumeBoundRetriesDelayMilliSeconds = 500 ) // We'll try to use the nicer-to-use shells first before we drop down to the lower shells @@ -93,9 +85,8 @@ var commandToRunWhenCreatingUserServiceShell = []string{ } var ( - volumeStorageClassName = "kurtosis-local-storage" - globalDeletePolicy = metav1.DeletePropagationForeground - globalDeleteOptions = metav1.DeleteOptions{ + globalDeletePolicy = metav1.DeletePropagationForeground + globalDeleteOptions = metav1.DeleteOptions{ TypeMeta: metav1.TypeMeta{ Kind: "", APIVersion: "", @@ -146,14 +137,17 @@ type KubernetesManager struct { kubernetesClientSet *kubernetes.Clientset // Underlying restClient configuration kuberneteRestConfig *rest.Config + // The storage class name as specified in the `kurtosis-config.yaml` + storageClass string } func int64Ptr(i int64) *int64 { return &i } -func NewKubernetesManager(kubernetesClientSet *kubernetes.Clientset, kuberneteRestConfig *rest.Config) *KubernetesManager { +func NewKubernetesManager(kubernetesClientSet *kubernetes.Clientset, kuberneteRestConfig *rest.Config, storageClass string) *KubernetesManager { return &KubernetesManager{ kubernetesClientSet: kubernetesClientSet, kuberneteRestConfig: kuberneteRestConfig, + storageClass: storageClass, } } @@ -327,142 +321,6 @@ func (manager *KubernetesManager) GetIngressesByLabels(ctx context.Context, name // ---------------------------Volumes------------------------------------------------------------------------------ -func (manager *KubernetesManager) CreatePersistentVolume( - ctx context.Context, - namespace string, - volumeName string, - labels map[string]string, - requiredSize int64, -) (*apiv1.PersistentVolume, error) { - if requiredSize == 0 { - return nil, stacktrace.NewError("Cannot create volume '%v' of size 0; need size greater than 0", volumeName) - } - - volumesClient := manager.kubernetesClientSet.CoreV1().PersistentVolumes() - - // Check that there's only one node, otherwise using HostPath volumes will not work. - // TODO: Support Persistent Volumes on Kubernetes with multiple nodes. We have a few options: - // - We make sure the pod restarting gets rescheduled on the same node, which would be quite easy but defeats a - // little bit the power of using k8s to balance load among nodes - // - We can use k8s' `local` persistent volumes. However, those do not support dynamic provisioning. They require - // the directory to already exist on the host. There's some k8s extensions to have them support dynamic provisioning - // but we can't assume all Kubernetes cluster users will use will have those extensions. At least for now - // - If this use case is hit only in the cloud (which is quite likely since having a multi-nodes k8s cluster running - // outside a cloud provider infra is quite rare), then maybe we should just use whatever the cloud provider has, - // like EBS for AWS for example. Those support dynamic provisioning and everything via their respective CSI drivers - listOptions := buildListOptionsFromLabels(map[string]string{}) - nodes, err := manager.kubernetesClientSet.CoreV1().Nodes().List(ctx, listOptions) - if err != nil { - return nil, stacktrace.Propagate(err, "An unexpected error occurred retrieving the list of Kubernetes nodes.") - } else if len(nodes.Items) > 1 { - return nil, stacktrace.NewError("Using persistent volumes on Kubernetes with multiple nodes is currently " + - "not supported. Reach out to Kurtosis if you need this feature.") - } - - hostPathPath := path.Join(volumeHostPathRootDirectory, namespace, volumeName) - hostPathType := apiv1.HostPathDirectoryOrCreate - persistentVolumeDefinition := apiv1.PersistentVolume{ - TypeMeta: metav1.TypeMeta{ - Kind: "", - APIVersion: "", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: volumeName, - GenerateName: "", - Namespace: "", - SelfLink: "", - UID: "", - ResourceVersion: "", - Generation: 0, - CreationTimestamp: metav1.Time{ - Time: time.Time{}, - }, - DeletionTimestamp: nil, - DeletionGracePeriodSeconds: nil, - Labels: labels, - Annotations: nil, - OwnerReferences: nil, - Finalizers: nil, - ManagedFields: nil, - }, - Spec: apiv1.PersistentVolumeSpec{ - Capacity: apiv1.ResourceList{ - apiv1.ResourceStorage: *resource.NewQuantity(requiredSize, resource.BinarySI), - }, - PersistentVolumeSource: apiv1.PersistentVolumeSource{ - GCEPersistentDisk: nil, - AWSElasticBlockStore: nil, - HostPath: &apiv1.HostPathVolumeSource{ - Path: hostPathPath, - Type: &hostPathType, - }, - Glusterfs: nil, - NFS: nil, - RBD: nil, - ISCSI: nil, - Cinder: nil, - CephFS: nil, - FC: nil, - Flocker: nil, - FlexVolume: nil, - AzureFile: nil, - VsphereVolume: nil, - Quobyte: nil, - AzureDisk: nil, - PhotonPersistentDisk: nil, - PortworxVolume: nil, - ScaleIO: nil, - Local: nil, - StorageOS: nil, - CSI: nil, - }, - AccessModes: []apiv1.PersistentVolumeAccessMode{ - apiv1.ReadWriteOnce, // ReadWriteOncePod would be better, but it's a fairly recent feature - }, - ClaimRef: nil, - PersistentVolumeReclaimPolicy: "", - StorageClassName: volumeStorageClassName, - MountOptions: nil, - VolumeMode: nil, - NodeAffinity: nil, - }, - Status: apiv1.PersistentVolumeStatus{ - Phase: "", - Message: "", - Reason: "", - }, - } - - volume, err := volumesClient.Create(ctx, &persistentVolumeDefinition, globalCreateOptions) - if err != nil { - return nil, stacktrace.Propagate(err, "Failed to create volume '%s'", volumeName) - } - return volume, err -} - -func (manager *KubernetesManager) RemovePersistentVolume( - ctx context.Context, - volumeName string, -) error { - volumesClient := manager.kubernetesClientSet.CoreV1().PersistentVolumes() - if err := volumesClient.Delete(ctx, volumeName, globalDeleteOptions); err != nil { - return stacktrace.Propagate(err, "An error occurred removing the persistent volume '%s'", volumeName) - } - return nil -} - -func (manager *KubernetesManager) GetPersistentVolume( - ctx context.Context, - volumeName string, -) (*apiv1.PersistentVolume, error) { - volumesClient := manager.kubernetesClientSet.CoreV1().PersistentVolumes() - volume, err := volumesClient.Get(ctx, volumeName, globalGetOptions) - if err != nil { - return nil, stacktrace.Propagate(err, "An error occurred getting the persistent volume '%s'", volumeName) - } - return volume, nil -} - func (manager *KubernetesManager) GetPersistentVolumesByLabels(ctx context.Context, persistentVolumeLabels map[string]string) (*apiv1.PersistentVolumeList, error) { persistentVolumesClient := manager.kubernetesClientSet.CoreV1().PersistentVolumes() @@ -540,8 +398,8 @@ func (manager *KubernetesManager) CreatePersistentVolumeClaim( }, Claims: nil, }, - VolumeName: volumeClaimName, // volume and their respective claims have the same name right now - StorageClassName: &volumeStorageClassName, + VolumeName: "", // we use dynamic provisioning this should happen automagically + StorageClassName: &manager.storageClass, VolumeMode: nil, DataSource: nil, DataSourceRef: nil, @@ -561,13 +419,7 @@ func (manager *KubernetesManager) CreatePersistentVolumeClaim( return nil, stacktrace.Propagate(err, "Failed to create volume claim '%s'", volumeClaimName) } - // Wait for the PVC to become bound and return that object (which will have VolumeName filled out) - boundVolumeClaim, err := manager.waitForPersistentVolumeClaimBinding(ctx, namespace, volumeClaim.Name) - if err != nil { - return nil, stacktrace.Propagate(err, "An error occurred waiting for persistent volume claim '%v' get bound in namespace '%v'", - volumeClaim.GetName(), volumeClaim.GetNamespace()) - } - return boundVolumeClaim, err + return volumeClaim, err } func (manager *KubernetesManager) RemovePersistentVolumeClaim( @@ -1625,7 +1477,7 @@ func (manager *KubernetesManager) RunExecCommandWithStreamedOutput( return execOutputChan, finalExecResultChan, nil } -func (manager *KubernetesManager) GetAllEnclaveResourcesByLabels(ctx context.Context, namespace string, labels map[string]string) (*apiv1.PodList, *apiv1.ServiceList, *apiv1.PersistentVolumeList, *rbacv1.ClusterRoleList, *rbacv1.ClusterRoleBindingList, error) { +func (manager *KubernetesManager) GetAllEnclaveResourcesByLabels(ctx context.Context, namespace string, labels map[string]string) (*apiv1.PodList, *apiv1.ServiceList, *rbacv1.ClusterRoleList, *rbacv1.ClusterRoleBindingList, error) { var ( wg = sync.WaitGroup{} @@ -1633,7 +1485,6 @@ func (manager *KubernetesManager) GetAllEnclaveResourcesByLabels(ctx context.Con allCallsDoneChan = make(chan bool) podsList *apiv1.PodList servicesList *apiv1.ServiceList - persistentVolumesList *apiv1.PersistentVolumeList clusterRolesList *rbacv1.ClusterRoleList clusterRoleBindingsList *rbacv1.ClusterRoleBindingList ) @@ -1658,16 +1509,6 @@ func (manager *KubernetesManager) GetAllEnclaveResourcesByLabels(ctx context.Con } }() - wg.Add(1) - go func() { - defer wg.Done() - var err error - persistentVolumesList, err = manager.GetPersistentVolumesByLabels(ctx, labels) - if err != nil { - errChan <- stacktrace.Propagate(err, "Expected to be able to get services with labels '%+v', instead a non-nil error was returned", labels) - } - }() - wg.Add(1) go func() { defer wg.Done() @@ -1698,14 +1539,14 @@ func (manager *KubernetesManager) GetAllEnclaveResourcesByLabels(ctx context.Con break case err, isChanOpen := <-errChan: if isChanOpen { - return nil, nil, nil, nil, nil, stacktrace.NewError("The error chan has been closed; this is a bug in Kurtosis") + return nil, nil, nil, nil, stacktrace.NewError("The error chan has been closed; this is a bug in Kurtosis") } if err != nil { - return nil, nil, nil, nil, nil, stacktrace.Propagate(err, "An error occurred getting pods and services for labels '%+v' in namespace '%s'", labels, namespace) + return nil, nil, nil, nil, stacktrace.Propagate(err, "An error occurred getting pods and services for labels '%+v' in namespace '%s'", labels, namespace) } } - return podsList, servicesList, persistentVolumesList, clusterRolesList, clusterRoleBindingsList, nil + return podsList, servicesList, clusterRolesList, clusterRoleBindingsList, nil } func (manager *KubernetesManager) GetPodsByLabels(ctx context.Context, namespace string, podLabels map[string]string) (*apiv1.PodList, error) { @@ -1981,51 +1822,6 @@ func transformTypedAnnotationsToStrs(input map[*kubernetes_annotation_key.Kubern } */ -func (manager *KubernetesManager) waitForPersistentVolumeClaimBinding( - ctx context.Context, - namespaceName string, - persistentVolumeClaimName string, -) (*apiv1.PersistentVolumeClaim, error) { - deadline := time.Now().Add(waitForPersistentVolumeBoundTimeout) - time.Sleep(time.Duration(waitForPersistentVolumeBoundInitialDelayMilliSeconds) * time.Millisecond) - var result *apiv1.PersistentVolumeClaim - for time.Now().Before(deadline) { - claim, err := manager.GetPersistentVolumeClaim(ctx, namespaceName, persistentVolumeClaimName) - if err != nil { - return nil, stacktrace.Propagate(err, "An error occurred getting persistent volume claim '%v' in namespace '%v", persistentVolumeClaimName, namespaceName) - } - result = claim - claimStatus := claim.Status - claimPhase := claimStatus.Phase - - switch claimPhase { - //Success phase, the Persistent Volume got bound - case apiv1.ClaimBound: - return result, nil - //Lost the Persistent Volume phase, unrecoverable state - case apiv1.ClaimLost: - return nil, stacktrace.NewError( - "The persistent volume claim '%v' ended up in unrecoverable state '%v'", - claim.GetName(), - claimPhase, - ) - case apiv1.ClaimPending: - // not impl - skipping - } - - time.Sleep(time.Duration(waitForPersistentVolumeBoundRetriesDelayMilliSeconds) * time.Millisecond) - } - - return nil, stacktrace.NewError( - "Persistent volume claim '%v' in namespace '%v' did not become bound despite waiting for %v with %v "+ - "between polls", - persistentVolumeClaimName, - namespaceName, - waitForPersistentVolumeBoundTimeout, - waitForPersistentVolumeBoundRetriesDelayMilliSeconds, - ) -} - func (manager *KubernetesManager) waitForPodAvailability(ctx context.Context, namespaceName string, podName string) error { // Wait for the pod to start running deadline := time.Now().Add(podWaitForAvailabilityTimeout) diff --git a/core/launcher/api_container_launcher/kubernetes_backend_config_supplier.go b/core/launcher/api_container_launcher/kubernetes_backend_config_supplier.go index 3ee3462580..a549ecfe3a 100644 --- a/core/launcher/api_container_launcher/kubernetes_backend_config_supplier.go +++ b/core/launcher/api_container_launcher/kubernetes_backend_config_supplier.go @@ -11,14 +11,15 @@ import ( ) type KubernetesBackendConfigSupplier struct { + storageClass string } -func NewKubernetesKurtosisBackendConfigSupplier() KubernetesBackendConfigSupplier { +func NewKubernetesKurtosisBackendConfigSupplier(storageClass string) KubernetesBackendConfigSupplier { return KubernetesBackendConfigSupplier{ - // More fields here when needed + storageClass: storageClass, } } func (backendConfigSupplier KubernetesBackendConfigSupplier) getKurtosisBackendConfig() (args.KurtosisBackendType, interface{}) { - return args.KurtosisBackendType_Kubernetes, kurtosis_backend_config.KubernetesBackendConfig{} + return args.KurtosisBackendType_Kubernetes, kurtosis_backend_config.KubernetesBackendConfig{StorageClass: backendConfigSupplier.storageClass} } diff --git a/core/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go b/core/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go index 1cf9387dae..97746beda1 100644 --- a/core/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go +++ b/core/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go @@ -6,5 +6,5 @@ package kurtosis_backend_config type KubernetesBackendConfig struct { - // More things here as needed + StorageClass string } diff --git a/core/server/api_container/main.go b/core/server/api_container/main.go index 94a7a33169..8f508510bf 100644 --- a/core/server/api_container/main.go +++ b/core/server/api_container/main.go @@ -146,7 +146,7 @@ func runMain() error { } case args.KurtosisBackendType_Kubernetes: // TODO Use this value when we have fields for the API container - _, ok := (clusterConfig).(kurtosis_backend_config.KubernetesBackendConfig) + clusterConfigK8s, ok := (clusterConfig).(kurtosis_backend_config.KubernetesBackendConfig) if !ok { return stacktrace.NewError( "Failed to cast untyped cluster configuration object '%+v' to the appropriate type, even though "+ @@ -155,7 +155,7 @@ func runMain() error { args.KurtosisBackendType_Kubernetes.String(), ) } - kurtosisBackend, err = kubernetes_kurtosis_backend.GetApiContainerBackend(ctx) + kurtosisBackend, err = kubernetes_kurtosis_backend.GetApiContainerBackend(ctx, clusterConfigK8s.StorageClass) if err != nil { return stacktrace.Propagate( err, diff --git a/docs/docs/cli-reference/cluster-get.md b/docs/docs/cli-reference/cluster-get.md index 3345924106..866f514fae 100644 --- a/docs/docs/cli-reference/cluster-get.md +++ b/docs/docs/cli-reference/cluster-get.md @@ -31,4 +31,9 @@ kurtosis-clusters: kubernetes-cluster-name: "NAME-OF-YOUR-CLUSTER" storage-class: "standard" enclave-size-in-megabytes: 10 -``` \ No newline at end of file +``` + +:::tip Storage Class +The Storage Class specified in the configuration above will be used for spinning up persistent volumes. Make sure you have the right +value in case you are using persistent directories. +::: \ No newline at end of file diff --git a/docs/docs/cli-reference/cluster-ls.md b/docs/docs/cli-reference/cluster-ls.md index 8c5cf519ef..1603f93f2f 100644 --- a/docs/docs/cli-reference/cluster-ls.md +++ b/docs/docs/cli-reference/cluster-ls.md @@ -31,4 +31,9 @@ kurtosis-clusters: kubernetes-cluster-name: "NAME-OF-YOUR-CLUSTER" storage-class: "standard" enclave-size-in-megabytes: 10 -``` \ No newline at end of file +``` + +:::tip Storage Class +The Storage Class specified in the configuration above will be used for spinning up persistent volumes. Make sure you have the right +value in case you are using persistent directories. +::: \ No newline at end of file diff --git a/docs/docs/cli-reference/cluster-set.md b/docs/docs/cli-reference/cluster-set.md index 7fc0d94e1f..42c0e22ed1 100644 --- a/docs/docs/cli-reference/cluster-set.md +++ b/docs/docs/cli-reference/cluster-set.md @@ -31,4 +31,9 @@ kurtosis-clusters: kubernetes-cluster-name: "NAME-OF-YOUR-CLUSTER" storage-class: "standard" enclave-size-in-megabytes: 10 -``` \ No newline at end of file +``` + +:::tip Storage Class +The Storage Class specified in the configuration above will be used for spinning up persistent volumes. Make sure you have the right +value in case you are using persistent directories. +::: \ No newline at end of file diff --git a/docs/docs/guides/running-in-k8s.md b/docs/docs/guides/running-in-k8s.md index 8efa2199a6..d9d80ba40c 100644 --- a/docs/docs/guides/running-in-k8s.md +++ b/docs/docs/guides/running-in-k8s.md @@ -41,7 +41,7 @@ III. Add your cluster information to `kurtosis-config.yml` -------------------------------- 1. Open the file located at `"$(kurtosis config path)"`. This should look like `/Users//Library/Application Support/kurtosis/kurtosis-config.yml` on MacOS. -2. Paste the following contents, changing `NAME-OF-YOUR-CLUSTER` to the cluster you created and save: +2. Paste the following contents, changing `NAME-OF-YOUR-CLUSTER` and `STORAGE-CLASS-TO-USE` as per the cluster you created and save: ```yaml config-version: 2 should-send-metrics: true @@ -58,10 +58,15 @@ kurtosis-clusters: type: "kubernetes" config: kubernetes-cluster-name: "NAME-OF-YOUR-CLUSTER" - storage-class: "standard" + storage-class: "STORAGE-CLASS-TO-USE" enclave-size-in-megabytes: 10 ``` +:::tip Storage Class +The Storage Class specified in the configuration above will be used for spinning up persistent volumes. Make sure you have the right +value in case you are using persistent directories. +::: + IV. Configure Kurtosis -------------------------------- diff --git a/engine/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go b/engine/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go index 3d2659e439..97746beda1 100644 --- a/engine/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go +++ b/engine/launcher/args/kurtosis_backend_config/kubernetes_backend_config.go @@ -6,4 +6,5 @@ package kurtosis_backend_config type KubernetesBackendConfig struct { + StorageClass string } diff --git a/engine/launcher/engine_server_launcher/kubernetes_backend_config_supplier.go b/engine/launcher/engine_server_launcher/kubernetes_backend_config_supplier.go index 762edb0d73..de746c18fe 100644 --- a/engine/launcher/engine_server_launcher/kubernetes_backend_config_supplier.go +++ b/engine/launcher/engine_server_launcher/kubernetes_backend_config_supplier.go @@ -23,5 +23,5 @@ func NewKubernetesKurtosisBackendConfigSupplier(storageClass string, enclaveSize } func (backendConfigSupplier KubernetesBackendConfigSupplier) getKurtosisBackendConfig() (args.KurtosisBackendType, interface{}) { - return args.KurtosisBackendType_Kubernetes, kurtosis_backend_config.KubernetesBackendConfig{} + return args.KurtosisBackendType_Kubernetes, kurtosis_backend_config.KubernetesBackendConfig{StorageClass: backendConfigSupplier.storageClass} } diff --git a/engine/server/engine/main.go b/engine/server/engine/main.go index e2bdc09dce..4ca8a4348a 100644 --- a/engine/server/engine/main.go +++ b/engine/server/engine/main.go @@ -177,7 +177,7 @@ func runMain() error { logFileManager := log_file_manager.NewLogFileManager(kurtosisBackend, osFs, realTime) logFileManager.StartLogFileManagement(ctx) - enclaveManager, err := getEnclaveManager(kurtosisBackend, serverArgs.KurtosisBackendType, serverArgs.ImageVersionTag, serverArgs.PoolSize, serverArgs.EnclaveEnvVars, logFileManager, serverArgs.MetricsUserID, serverArgs.DidUserAcceptSendingMetrics, serverArgs.IsCI, serverArgs.CloudUserID, serverArgs.CloudInstanceID) + enclaveManager, err := getEnclaveManager(kurtosisBackend, serverArgs.KurtosisBackendType, serverArgs.ImageVersionTag, serverArgs.PoolSize, serverArgs.EnclaveEnvVars, logFileManager, serverArgs.MetricsUserID, serverArgs.DidUserAcceptSendingMetrics, serverArgs.IsCI, serverArgs.CloudUserID, serverArgs.CloudInstanceID, serverArgs.KurtosisLocalBackendConfig) if err != nil { return stacktrace.Propagate(err, "Failed to create an enclave manager for backend type '%v' and config '%+v'", serverArgs.KurtosisBackendType, backendConfig) } @@ -299,13 +299,18 @@ func getEnclaveManager( isCI bool, cloudUserId metrics_client.CloudUserID, cloudInstanceId metrics_client.CloudInstanceID, + kurtosisLocalBackendConfig interface{}, ) (*enclave_manager.EnclaveManager, error) { var apiContainerKurtosisBackendConfigSupplier api_container_launcher.KurtosisBackendConfigSupplier switch kurtosisBackendType { case args.KurtosisBackendType_Docker: apiContainerKurtosisBackendConfigSupplier = api_container_launcher.NewDockerKurtosisBackendConfigSupplier() case args.KurtosisBackendType_Kubernetes: - apiContainerKurtosisBackendConfigSupplier = api_container_launcher.NewKubernetesKurtosisBackendConfigSupplier() + kurtosisLocalBackendConfigKubernetesType, ok := kurtosisLocalBackendConfig.(kurtosis_backend_config.KubernetesBackendConfig) + if !ok { + return nil, stacktrace.NewError("Failed to cast cluster configuration interface to the appropriate type, even though Kurtosis backend type is '%v'", args.KurtosisBackendType_Kubernetes.String()) + } + apiContainerKurtosisBackendConfigSupplier = api_container_launcher.NewKubernetesKurtosisBackendConfigSupplier(kurtosisLocalBackendConfigKubernetesType.StorageClass) default: return nil, stacktrace.NewError("Backend type '%v' was not recognized by engine server.", kurtosisBackendType.String()) } @@ -347,11 +352,11 @@ func getKurtosisBackend(ctx context.Context, kurtosisBackendType args.KurtosisBa "connect to a remote Kurtosis backend") } // Use this with more properties - _, ok := (backendConfig).(kurtosis_backend_config.KubernetesBackendConfig) + clusterConfigK8s, ok := (backendConfig).(kurtosis_backend_config.KubernetesBackendConfig) if !ok { return nil, stacktrace.NewError("Failed to cast cluster configuration interface to the appropriate type, even though Kurtosis backend type is '%v'", args.KurtosisBackendType_Kubernetes.String()) } - kurtosisBackend, err = kubernetes_kurtosis_backend.GetEngineServerBackend(ctx) + kurtosisBackend, err = kubernetes_kurtosis_backend.GetEngineServerBackend(ctx, clusterConfigK8s.StorageClass) if err != nil { return nil, stacktrace.Propagate( err,