diff --git a/pkg/controller/main-controller.go b/pkg/controller/main-controller.go index 68cdf988392..431e5d0c842 100644 --- a/pkg/controller/main-controller.go +++ b/pkg/controller/main-controller.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "net/http" "os" "os/signal" @@ -1316,6 +1317,29 @@ func (c *Controller) syncHandler(key string) (Result, error) { // return nil so we don't re-queue this work item, this error won't get fixed by reprocessing return WrapResult(Result{}, nil) } + + // check if the system config has changed + // if changed, minio request the systemCfg must be the same to restart. + expectedSystemCfg, err := c.getSystemCfgFromStatefulSet(ctx, expectedStatefulSet) + if err != nil { + return WrapResult(Result{}, err) + } + existingSystemCfg, err := c.getSystemCfgFromStatefulSet(ctx, existingStatefulSet) + if err != nil { + return WrapResult(Result{}, err) + } + if !maps.Equal(expectedSystemCfg, existingSystemCfg) { + // delete pdb to let all the statefulSet pods get deleted + err = c.DeletePDB(ctx, tenant) + if err != nil { + return Result{}, err + } + // find all existing statefulSet pods and delete them + err = c.DeletePodsByStatefulSet(ctx, existingStatefulSet) + if err != nil { + return WrapResult(Result{}, err) + } + } } // Handle PVC expansion diff --git a/pkg/controller/pods.go b/pkg/controller/pods.go index 9eeeef4b578..32089be3312 100644 --- a/pkg/controller/pods.go +++ b/pkg/controller/pods.go @@ -17,12 +17,17 @@ package controller import ( + "context" "fmt" "time" miniov2 "github.com/minio/operator/pkg/apis/minio.min.io/v2" "github.com/minio/operator/pkg/utils" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" ) // handlePodChange will handle changes in pods and queue it for processing, pods are already filtered by PodInformer @@ -43,3 +48,25 @@ func (c *Controller) handlePodChange(obj interface{}) { key := fmt.Sprintf("%s/%s", object.GetNamespace(), instanceName) c.healthCheckQueue.AddAfter(key, 1*time.Second) } + +// DeletePodsByStatefulSet deletes all pods associated with a statefulset +func (c *Controller) DeletePodsByStatefulSet(ctx context.Context, sts *appsv1.StatefulSet) (err error) { + listOpt := &client.ListOptions{ + Namespace: sts.Namespace, + } + client.MatchingLabels(sts.Spec.Template.Labels).ApplyToList(listOpt) + podList := &corev1.PodList{} + if err := c.k8sClient.List(ctx, podList, listOpt); err != nil { + return err + } + for _, item := range podList.Items { + if item.DeletionTimestamp == nil { + err = c.k8sClient.Delete(ctx, &item) + // Ignore Not Found + if client.IgnoreNotFound(err) != nil { + klog.Infof("unable to restart %s/%s (ignored): %s", item.Namespace, item.Name, err) + } + } + } + return +} diff --git a/pkg/controller/pools.go b/pkg/controller/pools.go index 1b366f07dc0..460ecb06b1b 100644 --- a/pkg/controller/pools.go +++ b/pkg/controller/pools.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "strings" corev1 "k8s.io/api/core/v1" @@ -92,6 +93,46 @@ func poolSSMatchesSpec(expectedStatefulSet, existingStatefulSet *appsv1.Stateful return true, nil } +// getSystemCfgFromStatefulSet gets the MinIO environment variables from a statefulset +func (c *Controller) getSystemCfgFromStatefulSet(ctx context.Context, sts *appsv1.StatefulSet) (systemCfg map[string]string, err error) { + systemCfg = make(map[string]string) + for _, container := range sts.Spec.Template.Spec.Containers { + if container.Name == miniov2.MinIOServerName { + for _, e := range container.Env { + if strings.HasPrefix(e.Name, "MINIO_") { + switch { + case e.Value != "": + systemCfg[e.Name] = e.Value + case e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil: + secret, err := c.kubeClientSet.CoreV1().Secrets(sts.Namespace).Get(ctx, e.ValueFrom.SecretKeyRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + value, ok := secret.Data[e.ValueFrom.SecretKeyRef.Key] + if !ok { + return nil, fmt.Errorf("secret %s does not have key %s", e.ValueFrom.SecretKeyRef.Name, e.ValueFrom.SecretKeyRef.Key) + } + systemCfg[e.Name] = string(value) + case e.ValueFrom != nil && e.ValueFrom.ConfigMapKeyRef != nil: + configMap, err := c.kubeClientSet.CoreV1().ConfigMaps(sts.Namespace).Get(ctx, e.ValueFrom.ConfigMapKeyRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + value, ok := configMap.Data[e.ValueFrom.ConfigMapKeyRef.Key] + if !ok { + return nil, fmt.Errorf("configmap %s does not have key %s", e.ValueFrom.ConfigMapKeyRef.Name, e.ValueFrom.ConfigMapKeyRef.Key) + } + systemCfg[e.Name] = value + default: + return nil, fmt.Errorf("unsupported env var %s", e.Name) + } + } + } + } + } + return +} + // restartInitializedPool restarts a pool that is assumed to have been initialized func (c *Controller) restartInitializedPool(ctx context.Context, tenant *miniov2.Tenant, pool miniov2.Pool, tenantConfiguration map[string][]byte) error { // get a new admin client that points to a pod of an already initialized pool (ie: pool-0)