Skip to content

Commit

Permalink
fix(upgradelog): adapt to new extravolume mechanics
Browse files Browse the repository at this point in the history
As for logging-operator v4.4.0, the way ExtraVolume is handled is
different. It's impossible to make the fluentd Pod associate with a
pre-created PVC. The PVC has now been created along with the fluentd
StatefulSet. The UpgradeLog mechanics adapt the new upstream behavior
to reconcile the StatefulSet populated PVC instead of creating a new
one.

An UpgradeLog OwnerReference will be added to the PVC to make it live
"longer" than its original owner/creator, i.e., the Logging object.

Signed-off-by: Zespre Chang <[email protected]>
  • Loading branch information
starbops authored and bk201 committed Sep 9, 2024
1 parent f05f444 commit 449d1ec
Show file tree
Hide file tree
Showing 5 changed files with 225 additions and 41 deletions.
2 changes: 1 addition & 1 deletion pkg/api/upgradelog/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ func prepareLogPackager(upgradeLog *harvesterv1.UpgradeLog, imageVersion, archiv
Name: "log-archive",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: name.SafeConcatName(upgradeLog.Name, util.UpgradeLogArchiveComponent),
ClaimName: ctlupgradelog.GetUpgradeLogPvcName(upgradeLog),
},
},
},
Expand Down
80 changes: 49 additions & 31 deletions pkg/controller/master/upgradelog/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package upgradelog

import (
"fmt"
"strings"

"github.com/cisco-open/operator-tools/pkg/volume"
loggingv1 "github.com/kube-logging/logging-operator/pkg/sdk/logging/api/v1beta1"
Expand All @@ -15,6 +16,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"

harvesterv1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
Expand All @@ -35,35 +37,6 @@ func upgradeLogReference(upgradeLog *harvesterv1.UpgradeLog) metav1.OwnerReferen
}
}

func preparePvc(upgradeLog *harvesterv1.UpgradeLog) *corev1.PersistentVolumeClaim {
volumeMode := corev1.PersistentVolumeFilesystem

return &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
util.LabelUpgradeLog: upgradeLog.Name,
util.LabelUpgradeLogComponent: util.UpgradeLogArchiveComponent,
},
Name: name.SafeConcatName(upgradeLog.Name, util.UpgradeLogArchiveComponent),
Namespace: util.HarvesterSystemNamespaceName,
OwnerReferences: []metav1.OwnerReference{
upgradeLogReference(upgradeLog),
},
},
Spec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
"storage": resource.MustParse(defaultLogArchiveVolumeSize),
},
},
VolumeMode: &volumeMode,
},
}
}

func prepareOperator(upgradeLog *harvesterv1.UpgradeLog) *mgmtv3.ManagedChart {
operatorName := name.SafeConcatName(upgradeLog.Name, util.UpgradeLogOperatorComponent)
return &mgmtv3.ManagedChart{
Expand Down Expand Up @@ -98,6 +71,8 @@ func prepareOperator(upgradeLog *harvesterv1.UpgradeLog) *mgmtv3.ManagedChart {
}

func prepareLogging(upgradeLog *harvesterv1.UpgradeLog, images map[string]Image) *loggingv1.Logging {
volumeMode := corev1.PersistentVolumeFilesystem

return &loggingv1.Logging{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
Expand Down Expand Up @@ -144,9 +119,20 @@ func prepareLogging(upgradeLog *harvesterv1.UpgradeLog, images map[string]Image)
Volume: &volume.KubernetesVolume{
PersistentVolumeClaim: &volume.PersistentVolumeClaim{
PersistentVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: name.SafeConcatName(upgradeLog.Name, util.UpgradeLogArchiveComponent),
ClaimName: util.UpgradeLogArchiveComponent,
ReadOnly: false,
},
PersistentVolumeClaimSpec: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.VolumeResourceRequirements{
Requests: corev1.ResourceList{
"storage": resource.MustParse(defaultLogArchiveVolumeSize),
},
},
VolumeMode: &volumeMode,
},
},
},
},
Expand Down Expand Up @@ -372,7 +358,7 @@ func prepareLogDownloader(upgradeLog *harvesterv1.UpgradeLog, imageVersion strin
Name: "log-archive",
VolumeSource: corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: name.SafeConcatName(upgradeLog.Name, util.UpgradeLogArchiveComponent),
ClaimName: GetUpgradeLogPvcName(upgradeLog),
ReadOnly: true,
},
},
Expand Down Expand Up @@ -416,6 +402,23 @@ func prepareLogDownloaderSvc(upgradeLog *harvesterv1.UpgradeLog) *corev1.Service
}
}

// Returns the name of the log-archive PVC, which is created by the fluentd StatefulSet.
//
// The name will look like: <upgradelog>-infra-log-archive-<upgradelog>-infra-fluentd-0
// For instance: hvst-upgrade-bczl4-upgradelog-infra-log-archive-hvst-upgrade-bczl4-upgradelog-infra-fluentd-0.
//
// TODO: As of rancher-logging v4.4.0, we use the PVC created by the fluentd StatefulSet, not making it ourselves. After v4.6.0, we need to revisit here and perhaps update the implementation because the upstream behavior changes.
func GetUpgradeLogPvcName(upgradeLog *harvesterv1.UpgradeLog) string {
return strings.Join([]string{
upgradeLog.Name,
util.UpgradeLogInfraComponent,
util.UpgradeLogArchiveComponent,
upgradeLog.Name,
util.UpgradeLogInfraComponent,
"fluentd-0",
}, "-")
}

func setOperatorDeployedCondition(upgradeLog *harvesterv1.UpgradeLog, status corev1.ConditionStatus, reason, message string) {
harvesterv1.LoggingOperatorDeployed.SetStatus(upgradeLog, string(status))
harvesterv1.LoggingOperatorDeployed.Reason(upgradeLog, reason)
Expand Down Expand Up @@ -836,6 +839,21 @@ func (p *pvcBuilder) WithLabel(key, value string) *pvcBuilder {
return p
}

func (p *pvcBuilder) OwnerReference(name, uid string) *pvcBuilder {
newOwnerReferences := []metav1.OwnerReference{{
Name: name,
UID: types.UID(uid),
}}

if len(p.pvc.OwnerReferences) == 0 {
p.pvc.OwnerReferences = newOwnerReferences
} else {
p.pvc.OwnerReferences = append(p.pvc.OwnerReferences, newOwnerReferences...)
}

return p
}

func (p *pvcBuilder) Build() *corev1.PersistentVolumeClaim {
return p.pvc
}
Expand Down
75 changes: 71 additions & 4 deletions pkg/controller/master/upgradelog/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"

harvesterv1 "github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
ctlharvesterv1 "github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io/v1beta1"
Expand All @@ -43,6 +45,15 @@ const (
upgradeLogStateAnnotation = "harvesterhci.io/upgradeLogState"
upgradeLogStateCollecting = "Collecting"
upgradeLogStateStopped = "Stopped"

appLabelName = "app.kubernetes.io/name"
)

var (
logArchiveMatchingLabels = labels.Set{
appLabelName: "fluentd",
util.LabelUpgradeLogComponent: util.UpgradeLogAggregatorComponent,
}
)

type handler struct {
Expand Down Expand Up @@ -146,10 +157,6 @@ func (h *handler) OnUpgradeLogChange(_ string, upgradeLog *harvesterv1.UpgradeLo

toUpdate := upgradeLog.DeepCopy()

// The volume acts as a central log storage for fluentd
if _, err := h.pvcClient.Create(preparePvc(upgradeLog)); err != nil && !apierrors.IsAlreadyExists(err) {
return nil, err
}
// The creation of the Logging resource will indirectly bring up fluent-bit DaemonSet and fluentd StatefulSet
candidateImages, err := h.getConsolidatedLoggingImageList(name.SafeConcatName(upgradeLog.Name, util.UpgradeLogOperatorComponent))
if err != nil {
Expand Down Expand Up @@ -581,6 +588,66 @@ func (h *handler) OnStatefulSetChange(_ string, statefulSet *appsv1.StatefulSet)
return statefulSet, err
}

func (h *handler) OnPvcChange(_ string, pvc *corev1.PersistentVolumeClaim) (*corev1.PersistentVolumeClaim, error) {
if pvc == nil || pvc.DeletionTimestamp != nil || pvc.Namespace != util.HarvesterSystemNamespaceName {
return pvc, nil
}

// We only care about the log-archive PVC created by the fluentd StatefulSet
pvcLabels := labels.Set(pvc.Labels)
if !logArchiveMatchingLabels.AsSelector().Matches(pvcLabels) {
return pvc, nil
}
upgradeLogName, ok := pvc.Labels[util.LabelUpgradeLog]
if !ok {
return pvc, nil
}

upgradeLog, err := h.upgradeLogCache.Get(util.HarvesterSystemNamespaceName, upgradeLogName)
if err != nil {
if !errors.IsNotFound(err) {
return pvc, err
}
logrus.WithFields(logrus.Fields{
"namespace": pvc.Namespace,
"name": pvc.Name,
"kind": pvc.Kind,
}).Warn("upgradelog not found, skip it")
return pvc, nil
}

newOwnerRef := metav1.OwnerReference{
Name: upgradeLog.Name,
APIVersion: upgradeLog.APIVersion,
UID: upgradeLog.UID,
Kind: upgradeLog.Kind,
}

// Check if the OwnerReference already exists
for _, ownerRef := range pvc.OwnerReferences {
if ownerRef.UID == newOwnerRef.UID {
return pvc, nil
}
}

// Add UpgradeLog as an owner of the log-archive PVC because we want it to
// live longer than its original owner, i.e., Logging, so pods like the
// downloader and packager can still access the log-archive volume.
toUpdate := pvc.DeepCopy()
toUpdate.OwnerReferences = append(toUpdate.OwnerReferences, newOwnerRef)

if !reflect.DeepEqual(pvc, toUpdate) {
logrus.WithFields(logrus.Fields{
"namespace": pvc.Namespace,
"name": pvc.Name,
"kind": pvc.Kind,
}).Info("updating ownerReference")
return h.pvcClient.Update(toUpdate)
}

return pvc, nil
}

func (h *handler) OnUpgradeChange(_ string, upgrade *harvesterv1.Upgrade) (*harvesterv1.Upgrade, error) {
if upgrade == nil || upgrade.DeletionTimestamp != nil || upgrade.Labels == nil || upgrade.Namespace != util.HarvesterSystemNamespaceName {
return upgrade, nil
Expand Down
Loading

0 comments on commit 449d1ec

Please sign in to comment.