diff --git a/PROJECT b/PROJECT index b7db8c4d3..21e00361d 100644 --- a/PROJECT +++ b/PROJECT @@ -126,4 +126,17 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: oceanbase.com + group: oceanbase + kind: OBClusterOperation + path: github.com/oceanbase/ob-operator/api/v1alpha1 + version: v1alpha1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 version: "3" diff --git a/api/constants/cluster.go b/api/constants/cluster.go new file mode 100644 index 000000000..eb3481b4b --- /dev/null +++ b/api/constants/cluster.go @@ -0,0 +1,32 @@ +/* +Copyright (c) 2024 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package constants + +import "github.com/oceanbase/ob-operator/api/types" + +const ( + ClusterOpTypeAddZones types.ClusterOperationType = "AddZones" + ClusterOpTypeDeleteZones types.ClusterOperationType = "DeleteZones" + ClusterOpTypeAdjustReplicas types.ClusterOperationType = "AdjustReplicas" + ClusterOpTypeUpgrade types.ClusterOperationType = "Upgrade" + ClusterOpTypeRestartOBServers types.ClusterOperationType = "RestartOBServers" + ClusterOpTypeModifyOBServers types.ClusterOperationType = "ModifyOBServers" + ClusterOpTypeSetParameters types.ClusterOperationType = "SetParameters" +) + +const ( + ClusterOpStatusPending types.ClusterOperationStatus = "Pending" + ClusterOpStatusRunning types.ClusterOperationStatus = "Running" + ClusterOpStatusSucceeded types.ClusterOperationStatus = "Succeeded" + ClusterOpStatusFailed types.ClusterOperationStatus = "Failed" +) diff --git a/api/types/types.go b/api/types/types.go index 5436e9628..e611a7384 100644 --- a/api/types/types.go +++ b/api/types/types.go @@ -30,3 +30,6 @@ type RestoreJobStatus string type TenantRole string type TenantOperationStatus string type TenantOperationType string + +type ClusterOperationType string +type ClusterOperationStatus string diff --git a/api/v1alpha1/obcluster_types.go b/api/v1alpha1/obcluster_types.go index 98edcd3ba..df8ea531a 100644 --- a/api/v1alpha1/obcluster_types.go +++ b/api/v1alpha1/obcluster_types.go @@ -61,7 +61,6 @@ type OBClusterStatus struct { //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" //+kubebuilder:printcolumn:name="Tasks",type="string",JSONPath=".status.operationContext.tasks",priority=1 //+kubebuilder:printcolumn:name="Task",type="string",JSONPath=".status.operationContext.task",priority=1 -//+kubebuilder:printcolumn:name="TaskIdx",type="string",JSONPath=".status.operationContext.idx",priority=1 // OBCluster is the Schema for the obclusters API type OBCluster struct { diff --git a/api/v1alpha1/obcluster_webhook.go b/api/v1alpha1/obcluster_webhook.go index 0169016dc..fb3ccd65a 100644 --- a/api/v1alpha1/obcluster_webhook.go +++ b/api/v1alpha1/obcluster_webhook.go @@ -234,13 +234,13 @@ func (r *OBCluster) ValidateUpdate(old runtime.Object) (admission.Warnings, erro newStorage := r.Spec.OBServerTemplate.Storage oldStorage := oldCluster.Spec.OBServerTemplate.Storage if newStorage.DataStorage.Size.Cmp(oldStorage.DataStorage.Size) > 0 { - err = errors.Join(err, r.validateStorageClassAllowExpansion(newStorage.DataStorage.StorageClass)) + err = errors.Join(err, validateStorageClassAllowExpansion(newStorage.DataStorage.StorageClass)) } if newStorage.LogStorage.Size.Cmp(oldStorage.LogStorage.Size) > 0 { - err = errors.Join(err, r.validateStorageClassAllowExpansion(newStorage.LogStorage.StorageClass)) + err = errors.Join(err, validateStorageClassAllowExpansion(newStorage.LogStorage.StorageClass)) } if newStorage.RedoLogStorage.Size.Cmp(oldStorage.RedoLogStorage.Size) > 0 { - err = errors.Join(err, r.validateStorageClassAllowExpansion(newStorage.RedoLogStorage.StorageClass)) + err = errors.Join(err, validateStorageClassAllowExpansion(newStorage.RedoLogStorage.StorageClass)) } if err != nil { return nil, err @@ -484,7 +484,7 @@ func (r *OBCluster) createDefaultUserSecret(secretName string) error { }) } -func (r *OBCluster) validateStorageClassAllowExpansion(storageClassName string) error { +func validateStorageClassAllowExpansion(storageClassName string) error { sc := storagev1.StorageClass{} err := clt.Get(context.Background(), types.NamespacedName{ Name: storageClassName, diff --git a/api/v1alpha1/obclusteroperation_types.go b/api/v1alpha1/obclusteroperation_types.go new file mode 100644 index 000000000..53517cd50 --- /dev/null +++ b/api/v1alpha1/obclusteroperation_types.go @@ -0,0 +1,132 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + apitypes "github.com/oceanbase/ob-operator/api/types" + tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// OBClusterOperationSpec defines the desired state of OBClusterOperation +type OBClusterOperationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make" to regenerate code after modifying this file + + OBCluster string `json:"obcluster"` + Type apitypes.ClusterOperationType `json:"type"` + Force bool `json:"force,omitempty"` + //+kubebuilder:default=7 + TTLDays int `json:"ttlDays,omitempty"` + AddZones []apitypes.OBZoneTopology `json:"addZones,omitempty"` + DeleteZones []string `json:"deleteZones,omitempty"` + AdjustReplicas []AlterZoneReplicas `json:"adjustReplicas,omitempty"` + RestartOBServers *RestartOBServersConfig `json:"restartOBServers,omitempty"` + Upgrade *UpgradeConfig `json:"upgrade,omitempty"` + ModifyOBServers *ModifyOBServersConfig `json:"modifyOBServers,omitempty"` + SetParameters []apitypes.Parameter `json:"setParameters,omitempty"` +} + +type ModifyOBServersConfig struct { + Resource *apitypes.ResourceSpec `json:"resource,omitempty"` + ExpandStorageSize *ExpandStorageSizeConfig `json:"expandStorageSize,omitempty"` + ModifyStorageClass *ModifyStorageClassConfig `json:"modifyStorageClass,omitempty"` + AddingMonitor *apitypes.MonitorTemplate `json:"addingMonitor,omitempty"` + RemoveMonitor bool `json:"removeMonitor,omitempty"` + AddingBackupVolume *apitypes.BackupVolumeSpec `json:"addingBackupVolume,omitempty"` + RemoveBackupVolume bool `json:"removeBackupVolume,omitempty"` +} + +type RestartOBServersConfig struct { + OBServers []string `json:"observers,omitempty"` + OBZones []string `json:"obzones,omitempty"` + All bool `json:"all,omitempty"` +} + +type ExpandStorageSizeConfig struct { + DataStorage *resource.Quantity `json:"dataStorage,omitempty"` + LogStorage *resource.Quantity `json:"logStorage,omitempty"` + RedoLogStorage *resource.Quantity `json:"redoLogStorage,omitempty"` +} + +type ModifyStorageClassConfig struct { + DataStorage string `json:"dataStorage,omitempty"` + LogStorage string `json:"logStorage,omitempty"` + RedoLogStorage string `json:"redoLogStorage,omitempty"` +} + +type UpgradeConfig struct { + Image string `json:"image"` +} + +type AlterZoneReplicas struct { + Zones []string `json:"zones"` + To int `json:"to,omitempty"` +} + +// OBClusterOperationStatus defines the observed state of OBClusterOperation +type OBClusterOperationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Status apitypes.ClusterOperationStatus `json:"status"` + OperationContext *tasktypes.OperationContext `json:"operationContext,omitempty"` + ClusterSnapshot *OBClusterSnapshot `json:"clusterSnapshot,omitempty"` +} + +type OBClusterSnapshot struct { + Spec *OBClusterSpec `json:"spec,omitempty"` + Status *OBClusterStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="Type",type=string,JSONPath=`.spec.type` +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.status" +//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +//+kubebuilder:printcolumn:name="Tasks",type="string",JSONPath=".status.operationContext.tasks",priority=1 +//+kubebuilder:printcolumn:name="Task",type="string",JSONPath=".status.operationContext.task",priority=1 + +// OBClusterOperation is the Schema for the obclusteroperations API +type OBClusterOperation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OBClusterOperationSpec `json:"spec,omitempty"` + Status OBClusterOperationStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// OBClusterOperationList contains a list of OBClusterOperation +type OBClusterOperationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []OBClusterOperation `json:"items"` +} + +func init() { + SchemeBuilder.Register(&OBClusterOperation{}, &OBClusterOperationList{}) +} + +func (o *OBClusterOperation) ShouldBeCleaned() bool { + return o.CreationTimestamp.AddDate(0, 0, o.Spec.TTLDays).Before(metav1.Now().Time) +} diff --git a/api/v1alpha1/obclusteroperation_webhook.go b/api/v1alpha1/obclusteroperation_webhook.go new file mode 100644 index 000000000..02b9c2a08 --- /dev/null +++ b/api/v1alpha1/obclusteroperation_webhook.go @@ -0,0 +1,248 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "context" + + kubeerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/oceanbase/ob-operator/api/constants" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" +) + +// log is for logging in this package. +var obclusteroperationlog = logf.Log.WithName("obclusteroperation-resource") + +// SetupWebhookWithManager will setup the manager to manage the webhooks +func (r *OBClusterOperation) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-oceanbase-oceanbase-com-v1alpha1-obclusteroperation,mutating=true,failurePolicy=fail,sideEffects=None,groups=oceanbase.oceanbase.com,resources=obclusteroperations,verbs=create;update,versions=v1alpha1,name=mobclusteroperation.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &OBClusterOperation{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *OBClusterOperation) Default() { + ctx := context.Background() + obcluster := OBCluster{} + err := clt.Get(ctx, types.NamespacedName{ + Namespace: r.Namespace, + Name: r.Spec.OBCluster, + }, &obcluster) + if err != nil { + obclusteroperationlog.Info("obcluster not found", "name", r.Spec.OBCluster) + return + } + if r.Labels == nil { + r.Labels = make(map[string]string) + } + r.Labels[oceanbaseconst.LabelRefOBCluster] = obcluster.Name +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-oceanbase-oceanbase-com-v1alpha1-obclusteroperation,mutating=false,failurePolicy=fail,sideEffects=None,groups=oceanbase.oceanbase.com,resources=obclusteroperations,verbs=create;update,versions=v1alpha1,name=vobclusteroperation.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &OBClusterOperation{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *OBClusterOperation) ValidateCreate() (admission.Warnings, error) { + switch r.Spec.Type { + case constants.ClusterOpTypeAddZones, + constants.ClusterOpTypeDeleteZones, + constants.ClusterOpTypeAdjustReplicas, + constants.ClusterOpTypeUpgrade, + constants.ClusterOpTypeRestartOBServers, + constants.ClusterOpTypeModifyOBServers, + constants.ClusterOpTypeSetParameters: + default: + return nil, field.Invalid(field.NewPath("spec").Child("type"), r.Spec.Type, "type must be one of AddZones, DeleteZones, AdjustReplicas, Upgrade, RestartOBServers, ModifyOBServers, SetParameters") + } + + if r.Spec.Type == constants.ClusterOpTypeAddZones && r.Spec.AddZones == nil { + return nil, field.Invalid(field.NewPath("spec").Child("addZones"), r.Spec.AddZones, "addZones must be set for cluster operation of type addZones") + } else if r.Spec.Type == constants.ClusterOpTypeDeleteZones && r.Spec.DeleteZones == nil { + return nil, field.Invalid(field.NewPath("spec").Child("deleteZones"), r.Spec.DeleteZones, "deleteZones must be set for cluster operation of type deleteZones") + } else if r.Spec.Type == constants.ClusterOpTypeAdjustReplicas && r.Spec.AdjustReplicas == nil { + return nil, field.Invalid(field.NewPath("spec").Child("adjustReplicas"), r.Spec.AdjustReplicas, "adjustReplicas must be set for cluster operation of type adjustReplicas") + } else if r.Spec.Type == constants.ClusterOpTypeUpgrade && r.Spec.Upgrade == nil { + return nil, field.Invalid(field.NewPath("spec").Child("upgrade"), r.Spec.Upgrade, "upgrade must be set for cluster operation of type upgrade") + } else if r.Spec.Type == constants.ClusterOpTypeRestartOBServers && r.Spec.RestartOBServers == nil { + return nil, field.Invalid(field.NewPath("spec").Child("restartOBServers"), r.Spec.RestartOBServers, "restartOBServers must be set for cluster operation of type restartOBServers") + } else if r.Spec.Type == constants.ClusterOpTypeModifyOBServers && r.Spec.ModifyOBServers == nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers"), r.Spec.ModifyOBServers, "modifyOBServers must be set for cluster operation of type modifyOBServers") + } else if r.Spec.Type == constants.ClusterOpTypeSetParameters && r.Spec.SetParameters == nil { + return nil, field.Invalid(field.NewPath("spec").Child("setParameters"), r.Spec.SetParameters, "setParameters must be set for cluster operation of type setParameters") + } + + ctx := context.Background() + obcluster := OBCluster{} + err := clt.Get(ctx, types.NamespacedName{ + Namespace: r.Namespace, + Name: r.Spec.OBCluster, + }, &obcluster) + if err != nil { + if kubeerrors.IsNotFound(err) { + return nil, field.Invalid(field.NewPath("spec").Child("obcluster"), r.Spec.OBCluster, "obcluster not found") + } + return nil, kubeerrors.NewInternalError(err) + } + if !r.Spec.Force && obcluster.Status.Status != "running" { + return nil, field.Invalid(field.NewPath("spec").Child("obcluster"), r.Spec.OBCluster, "obcluster is currently operating, please use force to override") + } + + zoneReplicaMap := make(map[string]int) + for _, z := range obcluster.Spec.Topology { + zoneReplicaMap[z.Zone] = z.Replica + } + + if r.Spec.AddZones != nil { + for _, zone := range r.Spec.AddZones { + if zone.Replica <= 0 { + return nil, field.Invalid(field.NewPath("spec").Child("addZones").Child("replica"), zone.Replica, "replica must be greater than 0") + } + if _, ok := zoneReplicaMap[zone.Zone]; ok { + return nil, field.Invalid(field.NewPath("spec").Child("addZones").Child("zone"), zone.Zone, "zone already exists") + } + } + } + + if r.Spec.DeleteZones != nil { + for _, zone := range r.Spec.DeleteZones { + if _, ok := zoneReplicaMap[zone]; !ok { + return nil, field.Invalid(field.NewPath("spec").Child("deleteZones").Child("zone"), zone, "zone does not exist") + } + } + } + + if r.Spec.AdjustReplicas != nil { + for _, alter := range r.Spec.AdjustReplicas { + if alter.To <= 0 { + return nil, field.Invalid(field.NewPath("spec").Child("adjustReplicas").Child("to"), alter.To, "replica must be greater than 0") + } + } + } + + if r.Spec.Type == constants.ClusterOpTypeModifyOBServers && r.Spec.ModifyOBServers != nil { + modifySpec := r.Spec.ModifyOBServers + if modifySpec.ExpandStorageSize != nil { + if modifySpec.ExpandStorageSize.DataStorage.Cmp(obcluster.Spec.OBServerTemplate.Storage.DataStorage.Size) < 0 { + return nil, field.Invalid(field.NewPath("spec").Child("expandStorageSize").Child("dataStorage"), modifySpec.ExpandStorageSize, "storage size can not be less than current size") + } + if modifySpec.ExpandStorageSize.LogStorage.Cmp(obcluster.Spec.OBServerTemplate.Storage.LogStorage.Size) < 0 { + return nil, field.Invalid(field.NewPath("spec").Child("expandStorageSize").Child("logStorage"), modifySpec.ExpandStorageSize, "storage size can not be less than current size") + } + if modifySpec.ExpandStorageSize.RedoLogStorage.Cmp(obcluster.Spec.OBServerTemplate.Storage.RedoLogStorage.Size) < 0 { + return nil, field.Invalid(field.NewPath("spec").Child("expandStorageSize").Child("redoLogStorage"), modifySpec.ExpandStorageSize, "storage size can not be less than current size") + } + } else if modifySpec.ModifyStorageClass != nil { + if modifySpec.ModifyStorageClass.DataStorage != "" && + modifySpec.ModifyStorageClass.DataStorage != obcluster.Spec.OBServerTemplate.Storage.DataStorage.StorageClass && + validateStorageClassAllowExpansion(modifySpec.ModifyStorageClass.DataStorage) != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyStorageClass").Child("dataStorage"), modifySpec.ModifyStorageClass, "storage class does not support expansion") + } + if modifySpec.ModifyStorageClass.LogStorage != "" && + modifySpec.ModifyStorageClass.LogStorage != obcluster.Spec.OBServerTemplate.Storage.LogStorage.StorageClass && + validateStorageClassAllowExpansion(modifySpec.ModifyStorageClass.LogStorage) != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyStorageClass").Child("logStorage"), modifySpec.ModifyStorageClass, "storage class does not support expansion") + } + if modifySpec.ModifyStorageClass.RedoLogStorage != "" && + modifySpec.ModifyStorageClass.RedoLogStorage != obcluster.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass && + validateStorageClassAllowExpansion(modifySpec.ModifyStorageClass.RedoLogStorage) != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyStorageClass").Child("redoLogStorage"), modifySpec.ModifyStorageClass, "storage class does not support expansion") + } + } + if modifySpec.AddingMonitor != nil && modifySpec.RemoveMonitor { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("addingMonitor"), r.Spec.ModifyOBServers, "can not add and remove monitor at the same time") + } + if modifySpec.AddingMonitor != nil && obcluster.Spec.MonitorTemplate != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("addingMonitor"), r.Spec.ModifyOBServers, "monitor container already exists") + } + if modifySpec.RemoveMonitor && obcluster.Spec.MonitorTemplate == nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("removeMonitor"), r.Spec.ModifyOBServers, "monitor container does not exist") + } + if modifySpec.AddingBackupVolume != nil && obcluster.Spec.BackupVolume != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("addingBackupVolume"), r.Spec.ModifyOBServers, "backup volume already exists") + } + if modifySpec.RemoveBackupVolume { + if modifySpec.AddingBackupVolume != nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("addingBackupVolume"), r.Spec.ModifyOBServers, "can not add and remove backup volume at the same time") + } + if obcluster.Spec.BackupVolume == nil { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("removeBackupVolume"), r.Spec.ModifyOBServers, "backup volume does not exist") + } + policyList := OBTenantBackupPolicyList{} + err := clt.List(ctx, &policyList, &client.ListOptions{ + Namespace: obcluster.Namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + oceanbaseconst.LabelRefOBCluster: obcluster.Name, + }), + }) + if err != nil { + return nil, kubeerrors.NewInternalError(err) + } + for _, policy := range policyList.Items { + if policy.Spec.DataBackup.Destination.Type == constants.BackupDestTypeNFS || + policy.Spec.LogArchive.Destination.Type == constants.BackupDestTypeNFS { + return nil, field.Invalid(field.NewPath("spec").Child("modifyOBServers").Child("removeBackupVolume"), r.Spec.ModifyOBServers, "backup volume is in use, can not be removed") + } + } + } + } + if obcluster.Annotations[oceanbaseconst.AnnotationsSupportStaticIP] != "true" { + if r.Spec.Type == constants.ClusterOpTypeRestartOBServers && r.Spec.RestartOBServers != nil { + return nil, field.Invalid(field.NewPath("spec").Child("obcluster"), r.Spec.OBCluster, "obcluster does not support static ip, can not restart observers") + } + if r.Spec.Type == constants.ClusterOpTypeModifyOBServers && r.Spec.ModifyOBServers != nil { + if r.Spec.ModifyOBServers.Resource != nil || + r.Spec.ModifyOBServers.AddingBackupVolume != nil || + r.Spec.ModifyOBServers.AddingMonitor != nil || + r.Spec.ModifyOBServers.RemoveMonitor { + return nil, field.Invalid(field.NewPath("spec").Child("obcluster"), r.Spec.OBCluster, "obcluster does not support static ip, can not modify observers") + } + } + } + + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *OBClusterOperation) ValidateUpdate(_ runtime.Object) (admission.Warnings, error) { + warnings := []string{"Update to OBClusterOperation won't take effect."} + + return warnings, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *OBClusterOperation) ValidateDelete() (admission.Warnings, error) { + obclusteroperationlog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/api/v1alpha1/observer_types.go b/api/v1alpha1/observer_types.go index a85a76293..4c0f39865 100644 --- a/api/v1alpha1/observer_types.go +++ b/api/v1alpha1/observer_types.go @@ -71,6 +71,8 @@ type OBServerStatus struct { //+kubebuilder:printcolumn:name="ClusterName",type="string",JSONPath=".spec.clusterName" //+kubebuilder:printcolumn:name="ZoneName",type="string",JSONPath=".spec.zone" //+kubebuilder:printcolumn:name="OBStatus",type="string",JSONPath=".status.obStatus",priority=1 +//+kubebuilder:printcolumn:name="Tasks",type="string",JSONPath=".status.operationContext.tasks",priority=1 +//+kubebuilder:printcolumn:name="Task",type="string",JSONPath=".status.operationContext.task",priority=1 // OBServer is the Schema for the observers API type OBServer struct { diff --git a/api/v1alpha1/obtenantbackuppolicy_types.go b/api/v1alpha1/obtenantbackuppolicy_types.go index 5de2cf813..bf8092f3c 100644 --- a/api/v1alpha1/obtenantbackuppolicy_types.go +++ b/api/v1alpha1/obtenantbackuppolicy_types.go @@ -110,6 +110,8 @@ func (in *OBTenantBackupPolicyStatus) DeepCopy() *OBTenantBackupPolicyStatus { //+kubebuilder:printcolumn:name="FullCrontab",type=string,JSONPath=`.spec.dataBackup.fullCrontab` //+kubebuilder:printcolumn:name="IncrementalCrontab",type=string,JSONPath=`.spec.dataBackup.incrementalCrontab` //+kubebuilder:resource:shortName=obtbp +//+kubebuilder:printcolumn:name="Tasks",type="string",JSONPath=".status.operationContext.tasks",priority=1 +//+kubebuilder:printcolumn:name="Task",type="string",JSONPath=".status.operationContext.task",priority=1 // OBTenantBackupPolicy is the Schema for the obtenantbackuppolicies API type OBTenantBackupPolicy struct { diff --git a/api/v1alpha1/obtenantbackuppolicy_webhook.go b/api/v1alpha1/obtenantbackuppolicy_webhook.go index f86691987..c63a361f0 100644 --- a/api/v1alpha1/obtenantbackuppolicy_webhook.go +++ b/api/v1alpha1/obtenantbackuppolicy_webhook.go @@ -87,6 +87,9 @@ func (r *OBTenantBackupPolicy) Default() { if r.Spec.LogArchive.Destination.Type == constants.BackupDestTypeOSS { r.Spec.LogArchive.Destination.Path = strings.ReplaceAll(r.Spec.LogArchive.Destination.Path, "/?", "?") } + if r.Labels == nil { + r.Labels = make(map[string]string) + } if r.Spec.TenantCRName != "" { tenant := &OBTenant{} err := bakClt.Get(context.Background(), types.NamespacedName{ @@ -110,12 +113,10 @@ func (r *OBTenantBackupPolicy) Default() { BlockOwnerDeletion: &blockOwnerDeletion, }}) - r.SetLabels(map[string]string{ - oceanbaseconst.LabelTenantName: r.Spec.TenantCRName, - oceanbaseconst.LabelRefOBCluster: r.Spec.ObClusterName, - oceanbaseconst.LabelRefUID: string(tenant.GetObjectMeta().GetUID()), - }) + r.Labels[oceanbaseconst.LabelTenantName] = r.Spec.TenantCRName + r.Labels[oceanbaseconst.LabelRefUID] = string(tenant.GetObjectMeta().GetUID()) } + r.Labels[oceanbaseconst.LabelRefOBCluster] = r.Spec.ObClusterName } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. @@ -248,6 +249,13 @@ func (r *OBTenantBackupPolicy) validateBackupPolicy() error { return field.Invalid(field.NewPath("spec").Child("dataBackup").Child("destination").Child("type"), r.Spec.DataBackup.Destination.Type, "invalid destination type, only NFS and OSS are supported") } + if r.Spec.DataBackup.Destination.Type == constants.BackupDestTypeNFS || + r.Spec.LogArchive.Destination.Type == constants.BackupDestTypeNFS { + if cluster.Spec.BackupVolume == nil { + return field.Invalid(field.NewPath("spec").Child("clusterName"), r.Spec.ObClusterName, "backupVolume of obcluster is required when using NFS") + } + } + // Check oss access of destinations if r.Spec.DataBackup.Destination.Type == constants.BackupDestTypeOSS && r.Spec.DataBackup.Destination.OSSAccessSecret != "" { if !ossPathPattern.MatchString(r.Spec.DataBackup.Destination.Path) { diff --git a/api/v1alpha1/obzone_types.go b/api/v1alpha1/obzone_types.go index ae7b02ff4..06e1ff86d 100644 --- a/api/v1alpha1/obzone_types.go +++ b/api/v1alpha1/obzone_types.go @@ -58,6 +58,8 @@ type OBZoneStatus struct { //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" //+kubebuilder:printcolumn:name="ClusterName",type="string",JSONPath=".spec.clusterName" //+kubebuilder:printcolumn:name="ZoneName",type="string",JSONPath=".spec.topology.zone" +//+kubebuilder:printcolumn:name="Tasks",type="string",JSONPath=".status.operationContext.tasks",priority=1 +//+kubebuilder:printcolumn:name="Task",type="string",JSONPath=".status.operationContext.task",priority=1 // OBZone is the Schema for the obzones API type OBZone struct { diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 3a1e80942..9926ecb2a 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -127,6 +127,9 @@ var _ = BeforeSuite(func() { err = (&OBResourceRescue{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) + err = (&OBClusterOperation{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + //+kubebuilder:scaffold:webhook go func() { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 248b0bb9c..68be3d1b1 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -26,6 +26,26 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlterZoneReplicas) DeepCopyInto(out *AlterZoneReplicas) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlterZoneReplicas. +func (in *AlterZoneReplicas) DeepCopy() *AlterZoneReplicas { + if in == nil { + return nil + } + out := new(AlterZoneReplicas) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CleanPolicy) DeepCopyInto(out *CleanPolicy) { *out = *in @@ -57,6 +77,36 @@ func (in *DataBackupConfig) DeepCopy() *DataBackupConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExpandStorageSizeConfig) DeepCopyInto(out *ExpandStorageSizeConfig) { + *out = *in + if in.DataStorage != nil { + in, out := &in.DataStorage, &out.DataStorage + x := (*in).DeepCopy() + *out = &x + } + if in.LogStorage != nil { + in, out := &in.LogStorage, &out.LogStorage + x := (*in).DeepCopy() + *out = &x + } + if in.RedoLogStorage != nil { + in, out := &in.RedoLogStorage, &out.RedoLogStorage + x := (*in).DeepCopy() + *out = &x + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpandStorageSizeConfig. +func (in *ExpandStorageSizeConfig) DeepCopy() *ExpandStorageSizeConfig { + if in == nil { + return nil + } + out := new(ExpandStorageSizeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalityType) DeepCopyInto(out *LocalityType) { *out = *in @@ -103,6 +153,58 @@ func (in *MigrateServerStatus) DeepCopy() *MigrateServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyOBServersConfig) DeepCopyInto(out *ModifyOBServersConfig) { + *out = *in + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = (*in).DeepCopy() + } + if in.ExpandStorageSize != nil { + in, out := &in.ExpandStorageSize, &out.ExpandStorageSize + *out = new(ExpandStorageSizeConfig) + (*in).DeepCopyInto(*out) + } + if in.ModifyStorageClass != nil { + in, out := &in.ModifyStorageClass, &out.ModifyStorageClass + *out = new(ModifyStorageClassConfig) + **out = **in + } + if in.AddingMonitor != nil { + in, out := &in.AddingMonitor, &out.AddingMonitor + *out = (*in).DeepCopy() + } + if in.AddingBackupVolume != nil { + in, out := &in.AddingBackupVolume, &out.AddingBackupVolume + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyOBServersConfig. +func (in *ModifyOBServersConfig) DeepCopy() *ModifyOBServersConfig { + if in == nil { + return nil + } + out := new(ModifyOBServersConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModifyStorageClassConfig) DeepCopyInto(out *ModifyStorageClassConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyStorageClassConfig. +func (in *ModifyStorageClassConfig) DeepCopy() *ModifyStorageClassConfig { + if in == nil { + return nil + } + out := new(ModifyStorageClassConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OBCluster) DeepCopyInto(out *OBCluster) { *out = *in @@ -162,6 +264,170 @@ func (in *OBClusterList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OBClusterOperation) DeepCopyInto(out *OBClusterOperation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OBClusterOperation. +func (in *OBClusterOperation) DeepCopy() *OBClusterOperation { + if in == nil { + return nil + } + out := new(OBClusterOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OBClusterOperation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OBClusterOperationList) DeepCopyInto(out *OBClusterOperationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OBClusterOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OBClusterOperationList. +func (in *OBClusterOperationList) DeepCopy() *OBClusterOperationList { + if in == nil { + return nil + } + out := new(OBClusterOperationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OBClusterOperationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OBClusterOperationSpec) DeepCopyInto(out *OBClusterOperationSpec) { + *out = *in + if in.AddZones != nil { + in, out := &in.AddZones, &out.AddZones + *out = make([]types.OBZoneTopology, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeleteZones != nil { + in, out := &in.DeleteZones, &out.DeleteZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdjustReplicas != nil { + in, out := &in.AdjustReplicas, &out.AdjustReplicas + *out = make([]AlterZoneReplicas, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RestartOBServers != nil { + in, out := &in.RestartOBServers, &out.RestartOBServers + *out = new(RestartOBServersConfig) + (*in).DeepCopyInto(*out) + } + if in.Upgrade != nil { + in, out := &in.Upgrade, &out.Upgrade + *out = new(UpgradeConfig) + **out = **in + } + if in.ModifyOBServers != nil { + in, out := &in.ModifyOBServers, &out.ModifyOBServers + *out = new(ModifyOBServersConfig) + (*in).DeepCopyInto(*out) + } + if in.SetParameters != nil { + in, out := &in.SetParameters, &out.SetParameters + *out = make([]types.Parameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OBClusterOperationSpec. +func (in *OBClusterOperationSpec) DeepCopy() *OBClusterOperationSpec { + if in == nil { + return nil + } + out := new(OBClusterOperationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OBClusterOperationStatus) DeepCopyInto(out *OBClusterOperationStatus) { + *out = *in + if in.OperationContext != nil { + in, out := &in.OperationContext, &out.OperationContext + *out = (*in).DeepCopy() + } + if in.ClusterSnapshot != nil { + in, out := &in.ClusterSnapshot, &out.ClusterSnapshot + *out = new(OBClusterSnapshot) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OBClusterOperationStatus. +func (in *OBClusterOperationStatus) DeepCopy() *OBClusterOperationStatus { + if in == nil { + return nil + } + out := new(OBClusterOperationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OBClusterSnapshot) DeepCopyInto(out *OBClusterSnapshot) { + *out = *in + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(OBClusterSpec) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(OBClusterStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OBClusterSnapshot. +func (in *OBClusterSnapshot) DeepCopy() *OBClusterSnapshot { + if in == nil { + return nil + } + out := new(OBClusterSnapshot) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OBClusterSpec) DeepCopyInto(out *OBClusterSpec) { *out = *in @@ -1221,6 +1487,31 @@ func (in *ResourcePoolStatus) DeepCopy() *ResourcePoolStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartOBServersConfig) DeepCopyInto(out *RestartOBServersConfig) { + *out = *in + if in.OBServers != nil { + in, out := &in.OBServers, &out.OBServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OBZones != nil { + in, out := &in.OBZones, &out.OBZones + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartOBServersConfig. +func (in *RestartOBServersConfig) DeepCopy() *RestartOBServersConfig { + if in == nil { + return nil + } + out := new(RestartOBServersConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSourceSpec) DeepCopyInto(out *RestoreSourceSpec) { *out = *in @@ -1381,3 +1672,18 @@ func (in *UnitStatus) DeepCopy() *UnitStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradeConfig) DeepCopyInto(out *UpgradeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradeConfig. +func (in *UpgradeConfig) DeepCopy() *UpgradeConfig { + if in == nil { + return nil + } + out := new(UpgradeConfig) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 58d3f09a0..0728e2bdf 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -213,6 +213,14 @@ func main() { setupLog.Error(err, "Unable to create controller", "controller", "OBResourceRescue") os.Exit(1) } + if err = (&controller.OBClusterOperationReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: telemetry.NewRecorder(ctx, mgr.GetEventRecorderFor(config.OBClusterOperationControllerName)), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OBClusterOperation") + os.Exit(1) + } if os.Getenv("DISABLE_WEBHOOKS") != "true" { if err = (&v1alpha1.OBTenantBackupPolicy{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "Unable to create webhook", "webhook", "OBTenantBackupPolicy") @@ -234,6 +242,10 @@ func main() { setupLog.Error(err, "Unable to create webhook", "webhook", "OBResourceRescue") os.Exit(1) } + if err = (&v1alpha1.OBClusterOperation{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "OBClusterOperation") + os.Exit(1) + } } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/oceanbase.oceanbase.com_obclusteroperations.yaml b/config/crd/bases/oceanbase.oceanbase.com_obclusteroperations.yaml new file mode 100644 index 000000000..573a0c6b6 --- /dev/null +++ b/config/crd/bases/oceanbase.oceanbase.com_obclusteroperations.yaml @@ -0,0 +1,5868 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + name: obclusteroperations.oceanbase.oceanbase.com +spec: + group: oceanbase.oceanbase.com + names: + kind: OBClusterOperation + listKind: OBClusterOperationList + plural: obclusteroperations + singular: obclusteroperation + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: Type + type: string + - jsonPath: .status.status + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.operationContext.tasks + name: Tasks + priority: 1 + type: string + - jsonPath: .status.operationContext.task + name: Task + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: OBClusterOperation is the Schema for the obclusteroperations + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OBClusterOperationSpec defines the desired state of OBClusterOperation + properties: + addZones: + items: + properties: + affinity: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a + no-op). A null preferred scheduling term matches + no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range + 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an + update), the system may or may not try to eventually + evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term + matches no objects. The requirements of them + are ANDed. The TopologySelectorTerm type implements + a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators + are In, NotIn, Exists, DoesNotExist. + Gt, and Lt. + type: string + values: + description: An array of string values. + If the operator is In or NotIn, the + values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. If + the operator is Gt or Lt, the values + array must have a single element, + which will be interpreted as an integer. + This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the affinity expressions specified + by this field, but it may choose a node that violates + one or more of the expressions. The node that is most + preferred is the one with the greatest sum of weights, + i.e. for each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum by iterating + through the elements of this field and adding "weight" + to the sum if the node has pods which matches the + corresponding podAffinityTerm; the node(s) with the + highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of + namespaces that the term applies to. The + term is applied to the union of the namespaces + selected by this field and the ones listed + in the namespaces field. null selector and + null or empty namespaces list means "this + pod's namespace". An empty selector ({}) + matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods + to nodes that satisfy the anti-affinity expressions + specified by this field, but it may choose a node + that violates one or more of the expressions. The + node that is most preferred is the one with the greatest + sum of weights, i.e. for each node that meets all + of the scheduling requirements (resource request, + requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements + of this field and adding "weight" to the sum if the + node has pods which matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of + namespaces that the term applies to. The + term is applied to the union of the namespaces + selected by this field and the ones listed + in the namespaces field. null selector and + null or empty namespaces list means "this + pod's namespace". An empty selector ({}) + matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term applies + to. The term is applied to the union of + the namespaces listed in this field and + the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector + means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose value + of the label with key topologyKey matches + that of any node on which any of the selected + pods is running. Empty topologyKey is not + allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the + corresponding podAffinityTerm, in the range + 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified + by this field are not met at scheduling time, the + pod will not be scheduled onto the node. If the anti-affinity + requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a + pod label update), the system may or may not try to + eventually evict the pod from its node. When there + are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all + terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or + not co-located (anti-affinity) with, where co-located + is defined as running on a node whose value of the + label with key matches that of any + node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces + that the term applies to. The term is applied + to the union of the namespaces selected by this + field and the ones listed in the namespaces + field. null selector and null or empty namespaces + list means "this pod's namespace". An empty + selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, a + key, and an operator that relates the + key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only + "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list + of namespace names that the term applies to. + The term is applied to the union of the namespaces + listed in this field and the ones selected by + namespaceSelector. null or empty namespaces + list and null namespaceSelector means "this + pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified + namespaces, where co-located is defined as running + on a node whose value of the label with key + topologyKey matches that of any node on which + any of the selected pods is running. Empty topologyKey + is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replica: + type: integer + tolerations: + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + zone: + type: string + required: + - replica + - zone + type: object + type: array + adjustReplicas: + items: + properties: + to: + type: integer + zones: + items: + type: string + type: array + required: + - zones + type: object + type: array + deleteZones: + items: + type: string + type: array + force: + type: boolean + modifyOBServers: + properties: + addingBackupVolume: + properties: + volume: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume + that you want to mount. If omitted, the default + is to mount by volume name. Examples: For volume + /dev/sda1, you specify the partition as "1". Similarly, + the volume partition for /dev/sda is "0" (or you + can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly + setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent + disk resource in AWS (Amazon EBS volume). More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a + collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is + /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is + the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados + user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret + object containing parameters used to connect to + OpenStack.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. If + a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked + optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file. Must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the + file to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. Consult with your admin + for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the + associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to + the secret object containing sensitive information + to pass to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the + secret object contains more than one secret, all + secret references are passed. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific + properties that are passed to the CSI driver. Consult + your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to set + permissions on this file, must be an octal + value between 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of the + relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default is + "" which means to use the node''s default medium. + Must be an empty string (default) or Memory. More + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local + storage required for this EmptyDir volume. The size + limit is also applicable for memory medium. The + maximum usage on memory medium EmptyDir would be + the minimum value between the SizeLimit specified + here and the sum of memory limits of all containers + in a pod. The default is nil which means that the + limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle + is tied to the pod that defines it - it will be created + before the pod starts, and deleted when the pod is removed. + \n Use this if: a) the volume is only needed while the + pod runs, b) features of normal volumes like restoring + from snapshot or capacity tracking are needed, c) the + storage driver is specified through a storage class, + and d) the storage driver supports dynamic volume provisioning + through a PersistentVolumeClaim (see EphemeralVolumeSource + for more information on the connection between this + volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that + persist for longer than the lifecycle of an individual + pod. \n Use CSI for light-weight local ephemeral volumes + if the CSI driver is meant to be used that way - see + the documentation of the driver for more information. + \n A pod can use both types of ephemeral volumes and + persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone + PVC to provision the volume. The pod in which this + EphemeralVolumeSource is embedded will be the owner + of the PVC, i.e. the PVC will be deleted together + with the pod. The name of the PVC will be `-` where `` is the + name from the `PodSpec.Volumes` array entry. Pod + validation will reject the pod if the concatenated + name is not valid for a PVC (for example, too long). + \n An existing PVC with that name that is not owned + by the pod will *not* be used for the pod to avoid + using an unrelated volume by mistake. Starting the + pod is then blocked until the unrelated PVC is removed. + If such a pre-created PVC is meant to be used by + the pod, the PVC has to updated with an owner reference + to the pod once the pod exists. Normally this should + not be necessary, but it may be useful when manually + reconstructing a broken cluster. \n This field is + read-only and no changes will be made by Kubernetes + to the PVC after it has been created. \n Required, + must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will be + rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into + the PVC that gets created from this template. + The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used + to specify either: * An existing VolumeSnapshot + object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller + can support the specified data source, it + will create a new volume based on the contents + of the specified data source. When the AnyVolumeDataSource + feature gate is enabled, dataSource contents + will be copied to dataSourceRef, and dataSourceRef + contents will be copied to dataSource when + dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef + will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If APIGroup + is not specified, the specified Kind + must be in the core API group. For any + other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the + object from which to populate the volume + with data, if a non-empty volume is desired. + This may be any object from a non-empty + API group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume + binding will only succeed if the type of + the specified object matches some installed + volume populator or dynamic provisioner. + This field will replace the functionality + of the dataSource field and as such if both + fields are non-empty, they must have the + same value. For backwards compatibility, + when namespace isn''t specified in dataSourceRef, + both fields (dataSource and dataSourceRef) + will be set to the same value automatically + if one of them is empty and the other is + non-empty. When namespace is specified in + dataSourceRef, dataSource isn''t set to + the same value and must be empty. There + are three important differences between + dataSource and dataSourceRef: * While dataSource + only allows two specific types of objects, + dataSourceRef allows any non-core object, + as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values + (dropping them), dataSourceRef preserves + all values, and generates an error if a + disallowed value is specified. * While dataSource + only allows local objects, dataSourceRef + allows objects in any namespaces. (Beta) + Using this field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) Using + the namespace field of dataSourceRef requires + the CrossNamespaceVolumeDataSource feature + gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. If APIGroup + is not specified, the specified Kind + must be in the core API group. For any + other third-party types, APIGroup is + required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: Namespace is the namespace + of resource being referenced Note that + when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent namespace + to allow that namespace's owner to accept + the reference. See the ReferenceGrant + documentation for details. (Alpha) This + field requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to + specify resource requirements that are lower + than previous value but must still be higher + than capacity recorded in the status field + of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of + resources, defined in spec.resourceClaims, + that are used by this container. \n + This is an alpha field and requires + enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the + name of one entry in pod.spec.resourceClaims + of the Pod where this field is + used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents + a key's relationship to a set + of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values array + must be non-empty. If the operator + is Exists or DoesNotExist, the + values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name + of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. Value + of Filesystem is implied when not included + in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. TODO: how + do we prevent errors in the filesystem from compromising + the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide + identifiers (wwids) Either wwids or combination + of targetWWNs and lun must be set, but not both + simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". The + default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false + (read/write). ReadOnly here will force the ReadOnly + setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is + reference to the secret object containing sensitive + information to pass to the plugin scripts. This + may be empty if no secret object is specified. If + the secret object contains more than one secret, + all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored + as metadata -> name on the dataset for Flocker should + be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume + that you want to mount. Tip: Ensure that the filesystem + type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume + that you want to mount. If omitted, the default + is to mount by volume name. Examples: For volume + /dev/sda1, you specify the partition as "1". Similarly, + the volume partition for /dev/sda is "0" (or you + can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource + in GCE. Used to identify the disk in GCE. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a + particular revision. DEPRECATED: GitRepo is deprecated. + To provision a container with a git repo, mount an EmptyDir + into an InitContainer that clones the repo using git, + then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. + Must not contain or start with '..'. If '.' is + supplied, the volume directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on + the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file + or directory on the host machine that is directly exposed + to the container. This is generally used for system + agents or other privileged things that are allowed to + see the host machine. Most containers will NOT need + this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host + directory mounts and who can/can not mount host directories + as read/write.' + properties: + path: + description: 'path of the directory on the host. If + the path is a symlink, it will follow the link to + the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to + "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource + that is attached to a kubelet''s host machine and then + exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. If initiatorName is specified with iscsiInterface + simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name + that uses an iSCSI transport. Defaults to 'default' + (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + The portal is either an IP or ip_addr:port if the + port is other than default (typically TCP ports + 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + The Portal is either an IP or ip_addr:port if the + port is other than default (typically TCP ports + 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host + that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting + in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type + to mount Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to + set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal + and decimal values, JSON requires decimal values + for mode bits. Directories within the path are not + affected by this setting. This might be in conflict + with other options that affect the file mode, like + fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of the + referenced ConfigMap will be projected + into the volume as a file whose name is + the key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the ConfigMap, + the volume setup will error unless it + is marked optional. Paths must be relative + and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a + field of the pod: only annotations, + labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in + terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified API + version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value between + 0000 and 0777 or a decimal value + between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of + the container: only resources limits + and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of the + referenced Secret will be projected into + the volume as a file whose name is the + key and content is the value. If specified, + the listed keys will be projected into + the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the Secret, the + volume setup will error unless it is marked + optional. Paths must be relative and may + not contain the '..' path or start with + '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on + this file. Must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts + both octal and decimal values, JSON + requires decimal values for mode + bits. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options + that affect the file mode, like + fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the key + to. May not be an absolute path. + May not contain the path element + '..'. May not start with the string + '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience + defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The + kubelet will start trying to rotate the + token if the token is older than 80 percent + of its time to live or if the token is + older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to + the mount point of the file to project + the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default + is no group + type: string + readOnly: + description: readOnly here will force the Quobyte + volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple + Quobyte Registry services specified as a string + as host:port pair (multiple entries are separated + with commas) which acts as the central registry + for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned + Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults + to serivceaccount user + type: string + volume: + description: volume is a string that references an + already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host operating + system. Examples: "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. More info: + https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + image: + description: 'image is the rados image name. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for + RBDUser. Default is /etc/ceph/keyring. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default + is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default + is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Default + is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for + ScaleIO user and other sensitive information. If + this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already + created in the ScaleIO system that is associated + with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires decimal + values for mode bits. Defaults to 0644. Directories + within the path are not affected by this setting. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file whose + name is the key and content is the value. If specified, + the listed keys will be projected into the specified + paths, and unlisted keys will not be present. If + a key is specified which is not present in the Secret, + the volume setup will error unless it is marked + optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file. Must be an + octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both + octal and decimal values, JSON requires decimal + values for mode bits. If not specified, the + volume defaultMode will be used. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the + file to map the key to. May not be an absolute + path. May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in + VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use + for obtaining the StorageOS API credentials. If + not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name + of the StorageOS volume. Volume names are only + unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of + the volume within StorageOS. If no namespace is + specified then the Pod's namespace will be used. This + allows the Kubernetes name scoping to be mirrored + within StorageOS for tighter integration. Set VolumeName + to any name to override the default behaviour. Set + to "default" if you are not using namespaces within + StorageOS. Namespaces that do not pre-exist within + StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must + be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated with + the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + required: + - volume + type: object + addingMonitor: + properties: + image: + type: string + resource: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - memory + type: object + required: + - image + - resource + type: object + expandStorageSize: + properties: + dataStorage: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + logStorage: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + redoLogStorage: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + modifyStorageClass: + properties: + dataStorage: + type: string + logStorage: + type: string + redoLogStorage: + type: string + type: object + removeBackupVolume: + type: boolean + removeMonitor: + type: boolean + resource: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - memory + type: object + type: object + obcluster: + type: string + restartOBServers: + properties: + all: + type: boolean + observers: + items: + type: string + type: array + obzones: + items: + type: string + type: array + type: object + setParameters: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + ttlDays: + default: 7 + type: integer + type: + type: string + upgrade: + properties: + image: + type: string + required: + - image + type: object + required: + - obcluster + - type + type: object + status: + description: OBClusterOperationStatus defines the observed state of OBClusterOperation + properties: + clusterSnapshot: + properties: + spec: + description: OBClusterSpec defines the desired state of OBCluster + properties: + backupVolume: + properties: + volume: + description: Volume represents a named volume in a pod + that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS + Disk resource that is attached to a kubelet''s host + machine and then exposed to the pod. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of + the volume that you want to mount. Tip: Ensure + that the filesystem type is supported by the + host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property + empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the + readOnly setting in VolumeMounts. More info: + https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent + disk resource in AWS (Amazon EBS volume). More + info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching + mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data + disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: + multiple blob disks per storage account Dedicated: + single blob disk per storage account Managed: + azure managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret + that contains Azure Storage Account Name and + Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on + the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is + a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is + /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef + is reference to the authentication secret for + User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados + user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: + https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to + a secret object containing parameters used to + connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that + should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. Consult with your + admin for the correct name as registered in + the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is + passed to the associated CSI driver which will + determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference + to the secret object containing sensitive information + to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if + no secret is required. If the secret object + contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration + for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific + properties that are passed to the CSI driver. + Consult your driver's documentation for supported + values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to + set permissions on this file, must be + an octal value between 0000 and 0777 or + a decimal value between 0 and 511. YAML + accepts both octal and decimal values, + JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict + with other options that affect the file + mode, like fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage + medium should back this directory. The default + is "" which means to use the node''s default + medium. Must be an empty string (default) or + Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of + local storage required for this EmptyDir volume. + The size limit is also applicable for memory + medium. The maximum usage on memory medium EmptyDir + would be the minimum value between the SizeLimit + specified here and the sum of memory limits + of all containers in a pod. The default is nil + which means that the limit is undefined. More + info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is + handled by a cluster storage driver. The volume's + lifecycle is tied to the pod that defines it - it + will be created before the pod starts, and deleted + when the pod is removed. \n Use this if: a) the + volume is only needed while the pod runs, b) features + of normal volumes like restoring from snapshot or + capacity tracking are needed, c) the storage driver + is specified through a storage class, and d) the + storage driver supports dynamic volume provisioning + through a PersistentVolumeClaim (see EphemeralVolumeSource + for more information on the connection between this + volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that + persist for longer than the lifecycle of an individual + pod. \n Use CSI for light-weight local ephemeral + volumes if the CSI driver is meant to be used that + way - see the documentation of the driver for more + information. \n A pod can use both types of ephemeral + volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone + PVC to provision the volume. The pod in which + this EphemeralVolumeSource is embedded will + be the owner of the PVC, i.e. the PVC will be + deleted together with the pod. The name of + the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` + array entry. Pod validation will reject the + pod if the concatenated name is not valid for + a PVC (for example, too long). \n An existing + PVC with that name that is not owned by the + pod will *not* be used for the pod to avoid + using an unrelated volume by mistake. Starting + the pod is then blocked until the unrelated + PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to + updated with an owner reference to the pod once + the pod exists. Normally this should not be + necessary, but it may be useful when manually + reconstructing a broken cluster. \n This field + is read-only and no changes will be made by + Kubernetes to the PVC after it has been created. + \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will + be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into + the PVC that gets created from this template. + The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: 'accessModes contains the + desired access modes the volume should + have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be + used to specify either: * An existing + VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller + can support the specified data source, + it will create a new volume based on + the contents of the specified data source. + When the AnyVolumeDataSource feature + gate is enabled, dataSource contents + will be copied to dataSourceRef, and + dataSourceRef contents will be copied + to dataSource when dataSourceRef.namespace + is not specified. If the namespace is + specified, then dataSourceRef will not + be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group + for the resource being referenced. + If APIGroup is not specified, the + specified Kind must be in the core + API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies + the object from which to populate the + volume with data, if a non-empty volume + is desired. This may be any object from + a non-empty API group (non core object) + or a PersistentVolumeClaim object. When + this field is specified, volume binding + will only succeed if the type of the + specified object matches some installed + volume populator or dynamic provisioner. + This field will replace the functionality + of the dataSource field and as such + if both fields are non-empty, they must + have the same value. For backwards compatibility, + when namespace isn''t specified in dataSourceRef, + both fields (dataSource and dataSourceRef) + will be set to the same value automatically + if one of them is empty and the other + is non-empty. When namespace is specified + in dataSourceRef, dataSource isn''t + set to the same value and must be empty. + There are three important differences + between dataSource and dataSourceRef: + * While dataSource only allows two specific + types of objects, dataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While dataSource ignores + disallowed values (dropping them), dataSourceRef + preserves all values, and generates + an error if a disallowed value is specified. + * While dataSource only allows local + objects, dataSourceRef allows objects + in any namespaces. (Beta) Using this + field requires the AnyVolumeDataSource + feature gate to be enabled. (Alpha) + Using the namespace field of dataSourceRef + requires the CrossNamespaceVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group + for the resource being referenced. + If APIGroup is not specified, the + specified Kind must be in the core + API group. For any other third-party + types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: Namespace is the namespace + of resource being referenced Note + that when a namespace is specified, + a gateway.networking.k8s.io/ReferenceGrant + object is required in the referent + namespace to allow that namespace's + owner to accept the reference. See + the ReferenceGrant documentation + for details. (Alpha) This field + requires the CrossNamespaceVolumeDataSource + feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the + minimum resources the volume should + have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed + to specify resource requirements that + are lower than previous value but must + still be higher than capacity recorded + in the status field of the claim. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names + of resources, defined in spec.resourceClaims, + that are used by this container. + \n This is an alpha field and requires + enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. + It can only be set for containers." + items: + description: ResourceClaim references + one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match + the name of one entry in pod.spec.resourceClaims + of the Pod where this field + is used. It makes that resource + available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the + maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the + minimum amount of compute resources + required. If Requests is omitted + for a container, it defaults to + Limits if that is explicitly specified, + otherwise to an implementation-defined + value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query + over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the + name of the StorageClass required by + the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. + Value of Filesystem is implied when + not included in claim spec. + type: string + volumeName: + description: volumeName is the binding + reference to the PersistentVolume backing + this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and + then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. TODO: how do we prevent errors + in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to + false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world + wide identifiers (wwids) Either wwids or combination + of targetWWNs and lun must be set, but not both + simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume + resource that is provisioned/attached using an exec + based plugin. + properties: + driver: + description: driver is the name of the driver + to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs", + "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field + holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to + false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef + is reference to the secret object containing + sensitive information to pass to the plugin + scripts. This may be empty if no secret object + is specified. If the secret object contains + more than one secret, all secrets are passed + to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the + Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset + stored as metadata -> name on the dataset for + Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the + volume that you want to mount. Tip: Ensure that + the filesystem type is supported by the host + operating system. Examples: "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + partition: + description: 'partition is the partition in the + volume that you want to mount. If omitted, the + default is to mount by volume name. Examples: + For volume /dev/sda1, you specify the partition + as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property + empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD + resource in GCE. Used to identify the disk in + GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository + at a particular revision. DEPRECATED: GitRepo is + deprecated. To provision a container with a git + repo, mount an EmptyDir into an InitContainer that + clones the repo using git, then mount the EmptyDir + into the Pod''s container.' + properties: + directory: + description: directory is the target directory + name. Must not contain or start with '..'. If + '.' is supplied, the volume directory will be + the git repository. Otherwise, if specified, + the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount + on the host that shares a pod''s lifetime. More + info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs + volume to be mounted with read-only permissions. + Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file + or directory on the host machine that is directly + exposed to the container. This is generally used + for system agents or other privileged things that + are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use + host directory mounts and who can/can not mount + host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. + If the path is a symlink, it will follow the + link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults + to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource + that is attached to a kubelet''s host machine and + then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether + support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of + the volume that you want to mount. Tip: Ensure + that the filesystem type is supported by the + host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI + Initiator Name. If initiatorName is specified + with iscsiInterface simultaneously, new iSCSI + interface : will + be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified + Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name + that uses an iSCSI transport. Defaults to 'default' + (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. The portal is either an IP or ip_addr:port + if the port is other than default (typically + TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for + iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + The Portal is either an IP or ip_addr:port if + the port is other than default (typically TCP + ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL + and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host + that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS + server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS + export to be mounted with read-only permissions. + Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address + of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same + namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this + volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly + setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets + host machine + properties: + fsType: + description: fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx + volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem + type to mount Must be a filesystem type supported + by the host operating system. Ex. "ext4", "xfs". + Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + Must be an octal value between 0000 and 0777 + or a decimal value between 0 and 511. YAML accepts + both octal and decimal values, JSON requires + decimal values for mode bits. Directories within + the path are not affected by this setting. This + might be in conflict with other options that + affect the file mode, like fsGroup, and the + result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + configMap: + description: configMap information about + the configMap data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced ConfigMap will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the ConfigMap, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is + written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits + used to set permissions on this + file, must be an octal value + between 0000 and 0777 or a decimal + value between 0 and 511. YAML + accepts both octal and decimal + values, JSON requires decimal + values for mode bits. If not + specified, the volume defaultMode + will be used. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can + be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. + Must be utf-8 encoded. The first + item of the relative path must + not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, requests.cpu + and requests.memory) are currently + supported.' + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the + output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: items if unspecified, each + key-value pair in the Data field of + the referenced Secret will be projected + into the volume as a file whose name + is the key and content is the value. + If specified, the listed keys will + be projected into the specified paths, + and unlisted keys will not be present. + If a key is specified which is not + present in the Secret, the volume + setup will error unless it is marked + optional. Paths must be relative and + may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to + a path within a volume. + properties: + key: + description: key is the key to + project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file. Must be an octal + value between 0000 and 0777 + or a decimal value between 0 + and 511. YAML accepts both octal + and decimal values, JSON requires + decimal values for mode bits. + If not specified, the volume + defaultMode will be used. This + might be in conflict with other + options that affect the file + mode, like fsGroup, and the + result can be other mode bits + set.' + format: int32 + type: integer + path: + description: path is the relative + path of the file to map the + key to. May not be an absolute + path. May not contain the path + element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: optional field specify + whether the Secret or its key must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to + project + properties: + audience: + description: audience is the intended + audience of the token. A recipient + of a token must identify itself with + an identifier specified in the audience + of the token, and otherwise should + reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the + requested duration of validity of + the service account token. As the + token approaches expiration, the kubelet + volume plugin will proactively rotate + the service account token. The kubelet + will start trying to rotate the token + if the token is older than 80 percent + of its time to live or if the token + is older than 24 hours.Defaults to + 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative + to the mount point of the file to + project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on + the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default + is no group + type: string + readOnly: + description: readOnly here will force the Quobyte + volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple + Quobyte Registry services specified as a string + as host:port pair (multiple entries are separated + with commas) which acts as the central registry + for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned + Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults + to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device + mount on the host that shares a pod''s lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of + the volume that you want to mount. Tip: Ensure + that the filesystem type is supported by the + host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem + from compromising the machine' + type: string + image: + description: 'image is the rados image name. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring + for RBDUser. Default is /etc/ceph/keyring. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph + monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default + is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default + is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs", + "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured + storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret + for ScaleIO user and other sensitive information. + If this is not provided, Login operation will + fail. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the + storage for a volume should be ThickProvisioned + or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume + already created in the ScaleIO system that is + associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should + populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by + default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. + Defaults to 0644. Directories within the path + are not affected by this setting. This might + be in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file. + Must be an octal value between 0000 and + 0777 or a decimal value between 0 and + 511. YAML accepts both octal and decimal + values, JSON requires decimal values for + mode bits. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of + the file to map the key to. May not be + an absolute path. May not contain the + path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret + in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to + mount. Must be a filesystem type supported by + the host operating system. Ex. "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). + ReadOnly here will force the ReadOnly setting + in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to + use for obtaining the StorageOS API credentials. If + not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable + name of the StorageOS volume. Volume names + are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. If no namespace + is specified then the Pod's namespace will be + used. This allows the Kubernetes name scoping + to be mirrored within StorageOS for tighter + integration. Set VolumeName to any name to override + the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will + be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. + Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage + Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + required: + - volume + type: object + clusterId: + format: int64 + type: integer + clusterName: + type: string + monitor: + properties: + image: + type: string + resource: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - memory + type: object + required: + - image + - resource + type: object + observer: + properties: + image: + type: string + resource: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - memory + type: object + storage: + properties: + dataStorage: + properties: + size: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + type: string + required: + - size + type: object + logStorage: + properties: + size: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + type: string + required: + - size + type: object + redoLogStorage: + properties: + size: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + storageClass: + type: string + required: + - size + type: object + required: + - dataStorage + - logStorage + - redoLogStorage + type: object + required: + - image + - resource + - storage + type: object + parameters: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + serviceAccount: + default: default + type: string + topology: + items: + properties: + affinity: + description: Affinity is a group of affinity scheduling + rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node matches the corresponding matchExpressions; + the node(s) with the highest sum are the most + preferred. + items: + description: An empty preferred scheduling + term matches all objects with implicit weight + 0 (i.e. it's a no-op). A null preferred + scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, + in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to an update), the + system may or may not try to eventually evict + the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: A null or empty node selector + term matches no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector + requirements by node's labels. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector + requirements by node's fields. + items: + description: A node selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: The label key that + the selector applies to. + type: string + operator: + description: Represents a key's + relationship to a set of values. + Valid operators are In, NotIn, + Exists, DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array of string + values. If the operator is + In or NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be empty. + If the operator is Gt or Lt, + the values array must have + a single element, which will + be interpreted as an integer. + This array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules + (e.g. co-locate this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the affinity expressions + specified by this field, but it may choose + a node that violates one or more of the expressions. + The node that is most preferred is the one + with the greatest sum of weights, i.e. for + each node that meets all of the scheduling + requirements (resource request, requiredDuringScheduling + affinity expressions, etc.), compute a sum + by iterating through the elements of this + field and adding "weight" to the sum if the + node has pods which matches the corresponding + podAffinityTerm; the node(s) with the highest + sum are the most preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified + by this field are not met at scheduling time, + the pod will not be scheduled onto the node. + If the affinity requirements specified by + this field cease to be met at some point during + pod execution (e.g. due to a pod label update), + the system may or may not try to eventually + evict the pod from its node. When there are + multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling + rules (e.g. avoid putting this pod in the same + node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule + pods to nodes that satisfy the anti-affinity + expressions specified by this field, but it + may choose a node that violates one or more + of the expressions. The node that is most + preferred is the one with the greatest sum + of weights, i.e. for each node that meets + all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity + expressions, etc.), compute a sum by iterating + through the elements of this field and adding + "weight" to the sum if the node has pods which + matches the corresponding podAffinityTerm; + the node(s) with the highest sum are the most + preferred. + items: + description: The weights of all of the matched + WeightedPodAffinityTerm fields are added + per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity + term, associated with the corresponding + weight. + properties: + labelSelector: + description: A label query over a + set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the + set of namespaces that the term + applies to. The term is applied + to the union of the namespaces selected + by this field and the ones listed + in the namespaces field. null selector + and null or empty namespaces list + means "this pod's namespace". An + empty selector ({}) matches all + namespaces. + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + items: + description: A label selector + requirement is a selector + that contains values, a key, + and an operator that relates + the key and values. + properties: + key: + description: key is the + label key that the selector + applies to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid + operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In + or NotIn, the values array + must be non-empty. If + the operator is Exists + or DoesNotExist, the values + array must be empty. This + array is replaced during + a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a + map of {key,value} pairs. A + single {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator + is "In", and the values array + contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies + a static list of namespace names + that the term applies to. The term + is applied to the union of the namespaces + listed in this field and the ones + selected by namespaceSelector. null + or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where + co-located is defined as running + on a node whose value of the label + with key topologyKey matches that + of any node on which any of the + selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching + the corresponding podAffinityTerm, in + the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements + specified by this field are not met at scheduling + time, the pod will not be scheduled onto the + node. If the anti-affinity requirements specified + by this field cease to be met at some point + during pod execution (e.g. due to a pod label + update), the system may or may not try to + eventually evict the pod from its node. When + there are multiple elements, the lists of + nodes corresponding to each podAffinityTerm + are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely + those matching the labelSelector relative + to the given namespace(s)) that this pod + should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is + defined as running on a node whose value + of the label with key matches + that of any node on which a pod of the set + of pods is running + properties: + labelSelector: + description: A label query over a set + of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set + of namespaces that the term applies + to. The term is applied to the union + of the namespaces selected by this field + and the ones listed in the namespaces + field. null selector and null or empty + namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to a + set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array + of string values. If the operator + is In or NotIn, the values + array must be non-empty. If + the operator is Exists or + DoesNotExist, the values array + must be empty. This array + is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. A single {key,value} + in the matchLabels map is equivalent + to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are + ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static + list of namespace names that the term + applies to. The term is applied to the + union of the namespaces listed in this + field and the ones selected by namespaceSelector. + null or empty namespaces list and null + namespaceSelector means "this pod's + namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located + (affinity) or not co-located (anti-affinity) + with the pods matching the labelSelector + in the specified namespaces, where co-located + is defined as running on a node whose + value of the label with key topologyKey + matches that of any node on which any + of the selected pods is running. Empty + topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + type: object + replica: + type: integer + tolerations: + items: + description: The pod this Toleration is attached to + tolerates any taint that matches the triple + using the matching operator . + properties: + effect: + description: Effect indicates the taint effect + to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, + PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; + this combination means to match all values and + all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and + Equal. Defaults to Equal. Exists is equivalent + to wildcard for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the + period of time the toleration (which must be + of effect NoExecute, otherwise this field is + ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever + (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value + should be empty, otherwise just a regular string. + type: string + type: object + type: array + zone: + type: string + required: + - replica + - zone + type: object + type: array + userSecrets: + properties: + monitor: + type: string + operator: + type: string + proxyro: + type: string + root: + type: string + required: + - root + type: object + required: + - clusterId + - clusterName + - observer + - topology + - userSecrets + type: object + status: + description: OBClusterStatus defines the observed state of OBCluster + properties: + image: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed + state of cluster Important: Run "make" to regenerate code + after modifying this file' + type: string + obzones: + items: + properties: + status: + type: string + zone: + type: string + required: + - status + - zone + type: object + type: array + operationContext: + properties: + failureRule: + properties: + failureStatus: + type: string + failureStrategy: + type: string + maxRetry: + type: integer + retryCount: + type: integer + required: + - failureStatus + - failureStrategy + type: object + idx: + type: integer + name: + type: string + targetStatus: + type: string + task: + type: string + taskId: + type: string + taskStatus: + type: string + tasks: + items: + type: string + type: array + required: + - idx + - name + - targetStatus + - task + - taskId + - taskStatus + - tasks + type: object + parameters: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + status: + type: string + required: + - image + - obzones + - parameters + - status + type: object + type: object + operationContext: + properties: + failureRule: + properties: + failureStatus: + type: string + failureStrategy: + type: string + maxRetry: + type: integer + retryCount: + type: integer + required: + - failureStatus + - failureStrategy + type: object + idx: + type: integer + name: + type: string + targetStatus: + type: string + task: + type: string + taskId: + type: string + taskStatus: + type: string + tasks: + items: + type: string + type: array + required: + - idx + - name + - targetStatus + - task + - taskId + - taskStatus + - tasks + type: object + status: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + required: + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/oceanbase.oceanbase.com_obclusters.yaml b/config/crd/bases/oceanbase.oceanbase.com_obclusters.yaml index 7d22b12e4..d5327219c 100644 --- a/config/crd/bases/oceanbase.oceanbase.com_obclusters.yaml +++ b/config/crd/bases/oceanbase.oceanbase.com_obclusters.yaml @@ -29,10 +29,6 @@ spec: name: Task priority: 1 type: string - - jsonPath: .status.operationContext.idx - name: TaskIdx - priority: 1 - type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/crd/bases/oceanbase.oceanbase.com_observers.yaml b/config/crd/bases/oceanbase.oceanbase.com_observers.yaml index 088aaa7c7..ef488c9df 100644 --- a/config/crd/bases/oceanbase.oceanbase.com_observers.yaml +++ b/config/crd/bases/oceanbase.oceanbase.com_observers.yaml @@ -34,6 +34,14 @@ spec: name: OBStatus priority: 1 type: string + - jsonPath: .status.operationContext.tasks + name: Tasks + priority: 1 + type: string + - jsonPath: .status.operationContext.task + name: Task + priority: 1 + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/crd/bases/oceanbase.oceanbase.com_obtenantbackuppolicies.yaml b/config/crd/bases/oceanbase.oceanbase.com_obtenantbackuppolicies.yaml index c713eeca3..39eea9543 100644 --- a/config/crd/bases/oceanbase.oceanbase.com_obtenantbackuppolicies.yaml +++ b/config/crd/bases/oceanbase.oceanbase.com_obtenantbackuppolicies.yaml @@ -38,6 +38,14 @@ spec: - jsonPath: .spec.dataBackup.incrementalCrontab name: IncrementalCrontab type: string + - jsonPath: .status.operationContext.tasks + name: Tasks + priority: 1 + type: string + - jsonPath: .status.operationContext.task + name: Task + priority: 1 + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/crd/bases/oceanbase.oceanbase.com_obzones.yaml b/config/crd/bases/oceanbase.oceanbase.com_obzones.yaml index a2b6913a9..85f46068e 100644 --- a/config/crd/bases/oceanbase.oceanbase.com_obzones.yaml +++ b/config/crd/bases/oceanbase.oceanbase.com_obzones.yaml @@ -27,6 +27,14 @@ spec: - jsonPath: .spec.topology.zone name: ZoneName type: string + - jsonPath: .status.operationContext.tasks + name: Tasks + priority: 1 + type: string + - jsonPath: .status.operationContext.task + name: Task + priority: 1 + type: string name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index edb1ce99e..88e2ba171 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -12,6 +12,7 @@ resources: - bases/oceanbase.oceanbase.com_obtenantbackuppolicies.yaml - bases/oceanbase.oceanbase.com_obtenantoperations.yaml - bases/oceanbase.oceanbase.com_obresourcerescues.yaml +- bases/oceanbase.oceanbase.com_obclusteroperations.yaml #+kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -27,6 +28,7 @@ patchesStrategicMerge: - patches/webhook_in_obtenantbackuppolicies.yaml - patches/webhook_in_obtenantoperations.yaml - patches/webhook_in_obresourcerescues.yaml +- patches/webhook_in_obclusteroperations.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -41,6 +43,7 @@ patchesStrategicMerge: - patches/cainjection_in_obtenantbackuppolicies.yaml - patches/cainjection_in_obtenantoperations.yaml - patches/cainjection_in_obresourcerescues.yaml +- patches/cainjection_in_obclusteroperations.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_obclusteroperations.yaml b/config/crd/patches/cainjection_in_obclusteroperations.yaml new file mode 100644 index 000000000..65417523d --- /dev/null +++ b/config/crd/patches/cainjection_in_obclusteroperations.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: obclusteroperations.oceanbase.oceanbase.com diff --git a/config/crd/patches/webhook_in_obclusteroperations.yaml b/config/crd/patches/webhook_in_obclusteroperations.yaml new file mode 100644 index 000000000..eb94f1036 --- /dev/null +++ b/config/crd/patches/webhook_in_obclusteroperations.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: obclusteroperations.oceanbase.oceanbase.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/obclusteroperation_editor_role.yaml b/config/rbac/obclusteroperation_editor_role.yaml new file mode 100644 index 000000000..9679f9277 --- /dev/null +++ b/config/rbac/obclusteroperation_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit obclusteroperations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: obclusteroperation-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ob-operator + app.kubernetes.io/part-of: ob-operator + app.kubernetes.io/managed-by: kustomize + name: obclusteroperation-editor-role +rules: +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations/status + verbs: + - get diff --git a/config/rbac/obclusteroperation_viewer_role.yaml b/config/rbac/obclusteroperation_viewer_role.yaml new file mode 100644 index 000000000..50d49cc15 --- /dev/null +++ b/config/rbac/obclusteroperation_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view obclusteroperations. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: obclusteroperation-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ob-operator + app.kubernetes.io/part-of: ob-operator + app.kubernetes.io/managed-by: kustomize + name: obclusteroperation-viewer-role +rules: +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations + verbs: + - get + - list + - watch +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 6a41e39b2..70b5dd4d5 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -177,6 +177,32 @@ rules: - get - patch - update +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations/finalizers + verbs: + - update +- apiGroups: + - oceanbase.oceanbase.com + resources: + - obclusteroperations/status + verbs: + - get + - patch + - update - apiGroups: - oceanbase.oceanbase.com resources: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 63b07653a..419043143 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -24,6 +24,26 @@ webhooks: resources: - obclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-oceanbase-oceanbase-com-v1alpha1-obclusteroperation + failurePolicy: Fail + name: mobclusteroperation.kb.io + rules: + - apiGroups: + - oceanbase.oceanbase.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - obclusteroperations + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -130,6 +150,26 @@ webhooks: resources: - obclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-oceanbase-oceanbase-com-v1alpha1-obclusteroperation + failurePolicy: Fail + name: vobclusteroperation.kb.io + rules: + - apiGroups: + - oceanbase.oceanbase.com + apiVersions: + - v1alpha1 + operations: + - CREATE + - UPDATE + resources: + - obclusteroperations + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/cmd/generator/task/task-register.go b/internal/cmds/generator/task/task_register.go similarity index 61% rename from cmd/generator/task/task-register.go rename to internal/cmds/generator/task/task_register.go index 9a0096a06..968fb552e 100644 --- a/cmd/generator/task/task-register.go +++ b/internal/cmds/generator/task/task_register.go @@ -19,6 +19,7 @@ import ( "go/token" "log" "os" + "regexp" "strings" "text/template" ) @@ -33,8 +34,39 @@ func init() { } ` +const taskNameGenTemplate = `// Code generated by go generate; DO NOT EDIT. +package {{.PackageName}} + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( +{{- range .TasksWithName }} + t{{.Task}} ttypes.TaskName = "{{.Name}}" +{{- end }} +) +` + type Task string +type TaskWithName struct { + Task Task + Name string +} + +func camelCaseToSpaceStyle(s string) string { + // var result string + // for i, r := range s { + // if i > 0 && unicode.IsUpper(r) { + // result += " " + // } + // result += strings.ToLower(string(r)) + // } + // return result + re := regexp.MustCompile(`(?m)([a-z])([A-Z])`) + spaceStr := strings.ToLower(re.ReplaceAllString(s, "${1} ${2}")) + return spaceStr +} + func main() { if len(os.Args) != 2 { log.Fatalf("Usage: %s ", os.Args[0]) @@ -48,6 +80,7 @@ func main() { } taskFuncs := []Task{} + tasksWithName := []TaskWithName{} ast.Inspect(node, func(n ast.Node) bool { fn, ok := n.(*ast.FuncDecl) if !ok { @@ -57,6 +90,10 @@ func main() { if len(fn.Type.Params.List) == 1 && len(fn.Type.Results.List) == 1 { if strings.HasSuffix(exprToString(fn.Type.Results.List[0].Type), "TaskError") { taskFuncs = append(taskFuncs, Task(fn.Name.Name)) + tasksWithName = append(tasksWithName, TaskWithName{ + Task: Task(fn.Name.Name), + Name: camelCaseToSpaceStyle(fn.Name.Name), + }) } } @@ -83,6 +120,32 @@ func main() { PackageName: node.Name.Name, Tasks: taskFuncs, }) + if err != nil { + log.Printf("Failed to execute template: %v", err) + return + } + + nameTmpl, err := template.New("taskName").Parse(taskNameGenTemplate) + if err != nil { + log.Printf("Failed to parse template: %v", err) + return + } + + outputFile2 := sourceFile[:len(sourceFile)-3] + "name_gen.go" + f2, err := os.Create(outputFile2) + if err != nil { + log.Printf("Failed to create output file: %v", err) + return + } + defer f2.Close() + + err = nameTmpl.Execute(f2, struct { + PackageName string + TasksWithName []TaskWithName + }{ + PackageName: node.Name.Name, + TasksWithName: tasksWithName, + }) if err != nil { log.Printf("Failed to execute template: %v", err) } diff --git a/internal/cmds/generator/task/task_register_test.go b/internal/cmds/generator/task/task_register_test.go new file mode 100644 index 000000000..b1f457df7 --- /dev/null +++ b/internal/cmds/generator/task/task_register_test.go @@ -0,0 +1,42 @@ +/* +Copyright (c) 2024 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package main + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/** +"ThisIsASampleCamelCaseString", +"IPAddress", +"ConvertIPAndHTML", +"SimpleCase", +"PDFLoader", +"ASimpleXMLParser", +*/ + +var _ = Describe("Test", func() { + It("Test", func() { + Expect(camelCaseToSpaceStyle("ChangeTenantRootPasswordFlow")).To(Equal("change tenant root password flow")) + Expect(camelCaseToSpaceStyle("ModifyClusterSpec")).To(Equal("modify cluster spec")) + Expect(camelCaseToSpaceStyle("RestartServers")).To(Equal("restart servers")) + Expect(camelCaseToSpaceStyle("CamelCaseStringIPExample")).To(Equal("camel case string ipexample")) + Expect(camelCaseToSpaceStyle("ThisIsASampleCamelCaseString")).To(Equal("this is asample camel case string")) + Expect(camelCaseToSpaceStyle("IPAddress")).To(Equal("ipaddress")) + Expect(camelCaseToSpaceStyle("ConvertIPAndHTML")).To(Equal("convert ipand html")) + Expect(camelCaseToSpaceStyle("SimpleCase")).To(Equal("simple case")) + Expect(camelCaseToSpaceStyle("PDFLoader")).To(Equal("pdfloader")) + Expect(camelCaseToSpaceStyle("ASimpleXMLParser")).To(Equal("asimple xmlparser")) + }) +}) diff --git a/internal/resource/obtenantbackup/names.go b/internal/cmds/generator/task/task_suite_test.go similarity index 64% rename from internal/resource/obtenantbackup/names.go rename to internal/cmds/generator/task/task_suite_test.go index b96c435fe..721cf01be 100644 --- a/internal/resource/obtenantbackup/names.go +++ b/internal/cmds/generator/task/task_suite_test.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2023 OceanBase +Copyright (c) 2024 OceanBase ob-operator is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: @@ -10,16 +10,16 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ -package obtenantbackup +package main import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) + "testing" -const ( - fCreateBackupJobInOB ttypes.FlowName = "create backup job in db" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" ) -const ( - tCreateBackupJobInOB ttypes.TaskName = "create backup job in db" -) +func TestTask(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Task Suite") +} diff --git a/internal/const/status/obcluster/obcluster_status.go b/internal/const/status/obcluster/obcluster_status.go index 143eb53f7..6f4e8a2bd 100644 --- a/internal/const/status/obcluster/obcluster_status.go +++ b/internal/const/status/obcluster/obcluster_status.go @@ -23,9 +23,9 @@ const ( ModifyOBParameter = "modify parameter" Bootstrapped = "bootstrapped" FinalizerFinished = "finalizer finished" - ScaleUp = "scale up" + ScaleVertically = "scale vertically" ExpandPVC = "expand pvc" Failed = "failed" - MountBackupVolume = "mount backup volume" + ModifyServerTemplate = "modify server template" RollingUpdateOBServers = "rolling update observers" ) diff --git a/internal/const/status/observer/observer_status.go b/internal/const/status/observer/observer_status.go index 72398dbb1..ab13e605b 100644 --- a/internal/const/status/observer/observer_status.go +++ b/internal/const/status/observer/observer_status.go @@ -13,17 +13,17 @@ See the Mulan PSL v2 for more details. package observer const ( - New = "new" - Recover = "recover observer" - Unrecoverable = "observer unrecoverable" - Running = "running" - Upgrade = "upgrade" - AddServer = "Add server" - Annotate = "annotate" - BootstrapReady = "bootstrap ready" - Deleting = "deleting" - ScaleUp = "scale up" - ExpandPVC = "expand pvc" - FinalizerFinished = "finalizer finished" - MountBackupVolume = "mount backup volume" + New = "new" + Recover = "recover observer" + Unrecoverable = "observer unrecoverable" + Running = "running" + Upgrade = "upgrade" + AddServer = "Add server" + Annotate = "annotate" + BootstrapReady = "bootstrap ready" + Deleting = "deleting" + ScaleVertically = "scale vertically" + ExpandPVC = "expand pvc" + FinalizerFinished = "finalizer finished" + ModifyingPodTemplate = "modifying pod template" ) diff --git a/internal/const/status/obzone/obzone_status.go b/internal/const/status/obzone/obzone_status.go index 81dd565be..de552efe8 100644 --- a/internal/const/status/obzone/obzone_status.go +++ b/internal/const/status/obzone/obzone_status.go @@ -23,8 +23,8 @@ const ( Upgrade = "upgrade" BootstrapReady = "bootstrap ready" FinalizerFinished = "finalizer finished" - ScaleUp = "scale up" + ScaleVertically = "scale vertically" ExpandPVC = "expand pvc" - MountBackupVolume = "mount backup volume" + ModifyServerTemplate = "modify server template" RollingUpdateServers = "rolling update servers" ) diff --git a/internal/controller/config/const.go b/internal/controller/config/const.go index d77a0fb32..b5d1dfe4a 100644 --- a/internal/controller/config/const.go +++ b/internal/controller/config/const.go @@ -23,4 +23,5 @@ const ( OBTenantBackupPolicyControllerName = "obtenantbackuppolicy-controller" OBTenantOperationControllerName = "obtenantoperation-controller" OBResourceRescueControllerName = "obresourcerescue-controller" + OBClusterOperationControllerName = "obclusteroperation-controller" ) diff --git a/internal/controller/obclusteroperation_controller.go b/internal/controller/obclusteroperation_controller.go new file mode 100644 index 000000000..baf42c8a2 --- /dev/null +++ b/internal/controller/obclusteroperation_controller.go @@ -0,0 +1,92 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + kubeerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + apiconsts "github.com/oceanbase/ob-operator/api/constants" + v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + res "github.com/oceanbase/ob-operator/internal/resource/obclusteroperation" + "github.com/oceanbase/ob-operator/internal/telemetry" + "github.com/oceanbase/ob-operator/pkg/coordinator" +) + +// OBClusterOperationReconciler reconciles a OBClusterOperation object +type OBClusterOperationReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder +} + +//+kubebuilder:rbac:groups=oceanbase.oceanbase.com,resources=obclusteroperations,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=oceanbase.oceanbase.com,resources=obclusteroperations/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=oceanbase.oceanbase.com,resources=obclusteroperations/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. + +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.3/pkg/reconcile +func (r *OBClusterOperationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + op := &v1alpha1.OBClusterOperation{} + err := r.Client.Get(ctx, req.NamespacedName, op) + if err != nil { + if kubeerrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get cluster operation") + return ctrl.Result{}, err + } + + switch op.Status.Status { + case apiconsts.ClusterOpStatusSucceeded, apiconsts.ClusterOpStatusFailed: + if op.ShouldBeCleaned() { + if err := r.Client.Delete(ctx, op); err != nil { + logger.Error(err, "Failed to delete stale cluster operation") + return ctrl.Result{}, err + } + } + } + + // create cluster operation manager + clusterOpManager := &res.OBClusterOperationManager{ + Ctx: ctx, + Resource: op, + Client: r.Client, + Logger: &logger, + Recorder: telemetry.NewRecorder(ctx, r.Recorder), + } + + cood := coordinator.NewCoordinator(clusterOpManager, &logger) + return cood.Coordinate() +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OBClusterOperationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.OBClusterOperation{}). + Complete(r) +} diff --git a/internal/resource/obcluster/names.go b/internal/resource/obcluster/names.go deleted file mode 100644 index 8c5200c08..000000000 --- a/internal/resource/obcluster/names.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obcluster - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -// obcluster flows -const ( - fMigrateOBClusterFromExisting ttypes.FlowName = "migrate obcluster from existing" - fBootstrapOBCluster ttypes.FlowName = "bootstrap obcluster" - fMaintainOBClusterAfterBootstrap ttypes.FlowName = "maintain obcluster after bootstrap" - fAddOBZone ttypes.FlowName = "add obzone" - fDeleteOBZone ttypes.FlowName = "delete obzone" - fModifyOBZoneReplica ttypes.FlowName = "modify obzone replica" - fUpgradeOBCluster ttypes.FlowName = "upgrade ob cluster" - fMaintainOBParameter ttypes.FlowName = "maintain ob parameter" - fDeleteOBClusterFinalizer ttypes.FlowName = "delete obcluster finalizer" - fScaleUpOBZones ttypes.FlowName = "scale up obzones" - fExpandPVC ttypes.FlowName = "expand pvc for obcluster" - fMountBackupVolume ttypes.FlowName = "mount backup volume for obcluster" - fRollingUpdateOBServers ttypes.FlowName = "rolling update observers" -) - -// obcluster tasks -const ( - tCheckMigration ttypes.TaskName = "check before migration" - tCheckImageReady ttypes.TaskName = "check image ready" - tCheckClusterMode ttypes.TaskName = "check cluster mode" - tCheckAndCreateUserSecrets ttypes.TaskName = "check and create user secrets" - tCreateOBZone ttypes.TaskName = "create obzone" - tDeleteOBZone ttypes.TaskName = "delete obzone" - tWaitOBZoneBootstrapReady ttypes.TaskName = "wait obzone bootstrap ready" - tBootstrap ttypes.TaskName = "bootstrap" - tCreateUsers ttypes.TaskName = "create users" - tUpdateParameter ttypes.TaskName = "update parameter" - tModifyOBZoneReplica ttypes.TaskName = "modify obzone replica" - tModifySysTenantReplica ttypes.TaskName = "modify sys tenant replica" - tWaitOBZoneRunning ttypes.TaskName = "wait obzone running" - tWaitOBZoneTopologyMatch ttypes.TaskName = "wait obzone topology match" - tWaitOBZoneDeleted ttypes.TaskName = "wait obzone deleted" - tCreateOBClusterService ttypes.TaskName = "create obcluster service" - tMaintainOBParameter ttypes.TaskName = "maintain obparameter" - // for upgrade - tValidateUpgradeInfo ttypes.TaskName = "validate upgrade info" - tUpgradeCheck ttypes.TaskName = "upgrade check" - tBackupEssentialParameters ttypes.TaskName = "backup essential parameters" - tBeginUpgrade ttypes.TaskName = "execute upgrade pre script" - tRollingUpgradeByZone ttypes.TaskName = "rolling upgrade by zone" - tFinishUpgrade ttypes.TaskName = "execute upgrade post script" - tRestoreEssentialParameters ttypes.TaskName = "restore essential parameters" - tCreateServiceForMonitor ttypes.TaskName = "create service for monitor" - tScaleUpOBZones ttypes.TaskName = "scale up obzones" - tExpandPVC ttypes.TaskName = "expand pvc" - tMountBackupVolume ttypes.TaskName = "mount backup volume" - tCheckEnvironment ttypes.TaskName = "check environment" - tAdjustParameters ttypes.TaskName = "adjust parameters" - tAnnotateOBCluster ttypes.TaskName = "annotate obcluster" - tRollingUpdateOBZones ttypes.TaskName = "rolling update observers" -) diff --git a/internal/resource/obcluster/obcluster_flow.go b/internal/resource/obcluster/obcluster_flow.go index 0e4361988..dc02167bc 100644 --- a/internal/resource/obcluster/obcluster_flow.go +++ b/internal/resource/obcluster/obcluster_flow.go @@ -23,7 +23,7 @@ import ( func genMigrateOBClusterFromExistingFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMigrateOBClusterFromExisting, + Name: "migrate obcluster from existing", Tasks: []tasktypes.TaskName{ tCheckMigration, tCheckImageReady, @@ -49,7 +49,7 @@ func genMigrateOBClusterFromExistingFlow(_ *OBClusterManager) *tasktypes.TaskFlo func genBootstrapOBClusterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fBootstrapOBCluster, + Name: "bootstrap obcluster", Tasks: []tasktypes.TaskName{ tCheckImageReady, tCheckEnvironment, @@ -70,7 +70,7 @@ func genBootstrapOBClusterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genMaintainOBClusterAfterBootstrapFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainOBClusterAfterBootstrap, + Name: "maintain obcluster after bootstrap", Tasks: []tasktypes.TaskName{ tWaitOBZoneRunning, tCreateUsers, @@ -87,7 +87,7 @@ func genMaintainOBClusterAfterBootstrapFlow(_ *OBClusterManager) *tasktypes.Task func genAddOBZoneFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fAddOBZone, + Name: "add obzone", Tasks: []tasktypes.TaskName{ tCreateOBZone, tWaitOBZoneRunning, @@ -101,7 +101,7 @@ func genAddOBZoneFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genDeleteOBZoneFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeleteOBZone, + Name: "delete obzone", Tasks: []tasktypes.TaskName{ tModifySysTenantReplica, tDeleteOBZone, @@ -115,7 +115,7 @@ func genDeleteOBZoneFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genModifyOBZoneReplicaFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fModifyOBZoneReplica, + Name: "modify obzone replica", Tasks: []tasktypes.TaskName{ tModifyOBZoneReplica, tWaitOBZoneTopologyMatch, @@ -129,7 +129,7 @@ func genModifyOBZoneReplicaFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genMaintainOBParameterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainOBParameter, + Name: "maintain obparameter", Tasks: []tasktypes.TaskName{ tMaintainOBParameter, }, @@ -141,7 +141,7 @@ func genMaintainOBParameterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genUpgradeOBClusterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fUpgradeOBCluster, + Name: "upgrade obcluster", Tasks: []tasktypes.TaskName{ tValidateUpgradeInfo, tBackupEssentialParameters, @@ -159,13 +159,13 @@ func genUpgradeOBClusterFlow(_ *OBClusterManager) *tasktypes.TaskFlow { } } -func genScaleUpOBZonesFlow(_ *OBClusterManager) *tasktypes.TaskFlow { +func genScaleOBZonesVerticallyFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fScaleUpOBZones, + Name: "scale obzones vertically", Tasks: []tasktypes.TaskName{ tAdjustParameters, - tScaleUpOBZones, + tScaleOBZonesVertically, }, TargetStatus: clusterstatus.Running, }, @@ -175,7 +175,7 @@ func genScaleUpOBZonesFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genExpandPVCFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fExpandPVC, + Name: "expand pvc", Tasks: []tasktypes.TaskName{ tExpandPVC, tWaitOBZoneRunning, @@ -185,12 +185,12 @@ func genExpandPVCFlow(_ *OBClusterManager) *tasktypes.TaskFlow { } } -func genMountBackupVolumeFlow(_ *OBClusterManager) *tasktypes.TaskFlow { +func genModifyServerTemplateFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMountBackupVolume, + Name: "modify server template", Tasks: []tasktypes.TaskName{ - tMountBackupVolume, + tModifyServerTemplate, tWaitOBZoneRunning, }, TargetStatus: clusterstatus.Running, @@ -201,7 +201,7 @@ func genMountBackupVolumeFlow(_ *OBClusterManager) *tasktypes.TaskFlow { func genRollingUpdateOBZonesFlow(_ *OBClusterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRollingUpdateOBServers, + Name: "rolling update observers", Tasks: []tasktypes.TaskName{ tRollingUpdateOBZones, }, diff --git a/internal/resource/obcluster/obcluster_manager.go b/internal/resource/obcluster/obcluster_manager.go index fb3ff946b..817960e4d 100644 --- a/internal/resource/obcluster/obcluster_manager.go +++ b/internal/resource/obcluster/obcluster_manager.go @@ -103,12 +103,12 @@ func (m *OBClusterManager) GetTaskFlow() (*tasktypes.TaskFlow, error) { taskFlow = genUpgradeOBClusterFlow(m) case clusterstatus.ModifyOBParameter: taskFlow = genMaintainOBParameterFlow(m) - case clusterstatus.ScaleUp: - taskFlow = genScaleUpOBZonesFlow(m) + case clusterstatus.ScaleVertically: + taskFlow = genScaleOBZonesVerticallyFlow(m) case clusterstatus.ExpandPVC: taskFlow = genExpandPVCFlow(m) - case clusterstatus.MountBackupVolume: - taskFlow = genMountBackupVolumeFlow(m) + case clusterstatus.ModifyServerTemplate: + taskFlow = genModifyServerTemplateFlow(m) case clusterstatus.RollingUpdateOBServers: taskFlow = genRollingUpdateOBZonesFlow(m) default: @@ -185,7 +185,7 @@ func (m *OBClusterManager) UpdateStatus() error { outer: for _, obzone := range obzoneList.Items { if m.OBCluster.SupportStaticIP() && m.checkIfCalcResourceChange(&obzone) { - m.OBCluster.Status.Status = clusterstatus.ScaleUp + m.OBCluster.Status.Status = clusterstatus.ScaleVertically break outer } if m.checkIfStorageClassChange(&obzone) { @@ -196,8 +196,8 @@ func (m *OBClusterManager) UpdateStatus() error { m.OBCluster.Status.Status = clusterstatus.ExpandPVC break outer } - if m.checkIfBackupVolumeAdded(&obzone) { - m.OBCluster.Status.Status = clusterstatus.MountBackupVolume + if m.checkIfBackupVolumeMutated(&obzone) || m.checkIfMonitorMutated(&obzone) { + m.OBCluster.Status.Status = clusterstatus.ModifyServerTemplate break outer } for _, zone := range m.OBCluster.Spec.Topology { diff --git a/internal/resource/obcluster/obcluster_task.go b/internal/resource/obcluster/obcluster_task.go index 879d3d639..b3bd12403 100644 --- a/internal/resource/obcluster/obcluster_task.go +++ b/internal/resource/obcluster/obcluster_task.go @@ -9,7 +9,7 @@ EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE package obcluster @@ -973,16 +973,16 @@ func CheckMigration(m *OBClusterManager) tasktypes.TaskError { return nil } -func ScaleUpOBZones(m *OBClusterManager) tasktypes.TaskError { - return m.rollingUpdateZones(m.changeZonesWhenScaling, zonestatus.ScaleUp, zonestatus.Running, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() +func ScaleOBZonesVertically(m *OBClusterManager) tasktypes.TaskError { + return m.rollingUpdateZones(m.changeZonesWhenScaling, zonestatus.ScaleVertically, zonestatus.Running, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func ExpandPVC(m *OBClusterManager) tasktypes.TaskError { return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenExpandingPVC, zonestatus.ExpandPVC, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } -func MountBackupVolume(m *OBClusterManager) tasktypes.TaskError { - return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenMountingBackupVolume, zonestatus.MountBackupVolume, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() +func ModifyServerTemplate(m *OBClusterManager) tasktypes.TaskError { + return m.rollingUpdateZones(m.changeZonesWhenModifyingServerTemplate, zonestatus.ModifyServerTemplate, zonestatus.Running, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitOBZoneBootstrapReady(m *OBClusterManager) tasktypes.TaskError { diff --git a/internal/resource/obcluster/obcluster_task_gen.go b/internal/resource/obcluster/obcluster_task_gen.go index f87b8b26b..706f73242 100644 --- a/internal/resource/obcluster/obcluster_task_gen.go +++ b/internal/resource/obcluster/obcluster_task_gen.go @@ -24,9 +24,9 @@ func init() { taskMap.Register(tCheckImageReady, CheckImageReady) taskMap.Register(tCheckClusterMode, CheckClusterMode) taskMap.Register(tCheckMigration, CheckMigration) - taskMap.Register(tScaleUpOBZones, ScaleUpOBZones) + taskMap.Register(tScaleOBZonesVertically, ScaleOBZonesVertically) taskMap.Register(tExpandPVC, ExpandPVC) - taskMap.Register(tMountBackupVolume, MountBackupVolume) + taskMap.Register(tModifyServerTemplate, ModifyServerTemplate) taskMap.Register(tWaitOBZoneBootstrapReady, WaitOBZoneBootstrapReady) taskMap.Register(tWaitOBZoneRunning, WaitOBZoneRunning) taskMap.Register(tRollingUpdateOBZones, RollingUpdateOBZones) diff --git a/internal/resource/obcluster/obcluster_taskname_gen.go b/internal/resource/obcluster/obcluster_taskname_gen.go new file mode 100644 index 000000000..13f067c6b --- /dev/null +++ b/internal/resource/obcluster/obcluster_taskname_gen.go @@ -0,0 +1,38 @@ +// Code generated by go generate; DO NOT EDIT. +package obcluster + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tWaitOBZoneTopologyMatch ttypes.TaskName = "wait obzone topology match" + tWaitOBZoneDeleted ttypes.TaskName = "wait obzone deleted" + tModifyOBZoneReplica ttypes.TaskName = "modify obzone replica" + tDeleteOBZone ttypes.TaskName = "delete obzone" + tCreateOBZone ttypes.TaskName = "create obzone" + tBootstrap ttypes.TaskName = "bootstrap" + tCreateUsers ttypes.TaskName = "create users" + tMaintainOBParameter ttypes.TaskName = "maintain obparameter" + tValidateUpgradeInfo ttypes.TaskName = "validate upgrade info" + tUpgradeCheck ttypes.TaskName = "upgrade check" + tBackupEssentialParameters ttypes.TaskName = "backup essential parameters" + tBeginUpgrade ttypes.TaskName = "begin upgrade" + tRollingUpgradeByZone ttypes.TaskName = "rolling upgrade by zone" + tFinishUpgrade ttypes.TaskName = "finish upgrade" + tModifySysTenantReplica ttypes.TaskName = "modify sys tenant replica" + tCreateServiceForMonitor ttypes.TaskName = "create service for monitor" + tRestoreEssentialParameters ttypes.TaskName = "restore essential parameters" + tCheckAndCreateUserSecrets ttypes.TaskName = "check and create user secrets" + tCreateOBClusterService ttypes.TaskName = "create obcluster service" + tCheckImageReady ttypes.TaskName = "check image ready" + tCheckClusterMode ttypes.TaskName = "check cluster mode" + tCheckMigration ttypes.TaskName = "check migration" + tScaleOBZonesVertically ttypes.TaskName = "scale obzones vertically" + tExpandPVC ttypes.TaskName = "expand pvc" + tModifyServerTemplate ttypes.TaskName = "modify server template" + tWaitOBZoneBootstrapReady ttypes.TaskName = "wait obzone bootstrap ready" + tWaitOBZoneRunning ttypes.TaskName = "wait obzone running" + tRollingUpdateOBZones ttypes.TaskName = "rolling update obzones" + tCheckEnvironment ttypes.TaskName = "check environment" + tAnnotateOBCluster ttypes.TaskName = "annotate obcluster" + tAdjustParameters ttypes.TaskName = "adjust parameters" +) diff --git a/internal/resource/obcluster/utils.go b/internal/resource/obcluster/utils.go index edef39b43..69f15b89b 100644 --- a/internal/resource/obcluster/utils.go +++ b/internal/resource/obcluster/utils.go @@ -54,8 +54,12 @@ func (m *OBClusterManager) checkIfCalcResourceChange(obzone *v1alpha1.OBZone) bo obzone.Spec.OBServerTemplate.Resource.Memory.Cmp(m.OBCluster.Spec.OBServerTemplate.Resource.Memory) != 0 } -func (m *OBClusterManager) checkIfBackupVolumeAdded(obzone *v1alpha1.OBZone) bool { - return obzone.Spec.BackupVolume == nil && m.OBCluster.Spec.BackupVolume != nil +func (m *OBClusterManager) checkIfBackupVolumeMutated(obzone *v1alpha1.OBZone) bool { + return (obzone.Spec.BackupVolume == nil) != (m.OBCluster.Spec.BackupVolume == nil) +} + +func (m *OBClusterManager) checkIfMonitorMutated(obzone *v1alpha1.OBZone) bool { + return (obzone.Spec.MonitorTemplate == nil) != (m.OBCluster.Spec.MonitorTemplate == nil) } func (m *OBClusterManager) retryUpdateStatus() error { @@ -186,8 +190,9 @@ func (m *OBClusterManager) changeZonesWhenUpdatingOBServers(obzone *v1alpha1.OBZ obzone.Spec.OBServerTemplate = m.OBCluster.Spec.OBServerTemplate } -func (m *OBClusterManager) changeZonesWhenMountingBackupVolume(obzone *v1alpha1.OBZone) { +func (m *OBClusterManager) changeZonesWhenModifyingServerTemplate(obzone *v1alpha1.OBZone) { obzone.Spec.BackupVolume = m.OBCluster.Spec.BackupVolume + obzone.Spec.MonitorTemplate = m.OBCluster.Spec.MonitorTemplate } func (m *OBClusterManager) modifyOBZonesAndCheckStatus(changer obzoneChanger, status string, timeoutSeconds int) tasktypes.TaskFunc { diff --git a/internal/resource/obclusteroperation/obclusteroperation_flow.go b/internal/resource/obclusteroperation/obclusteroperation_flow.go new file mode 100644 index 000000000..ccf426c79 --- /dev/null +++ b/internal/resource/obclusteroperation/obclusteroperation_flow.go @@ -0,0 +1,49 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package obclusteroperation + +import ( + "github.com/oceanbase/ob-operator/api/constants" + tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" +) + +func genModifySpecAndWatchFlow(_ *OBClusterOperationManager) *tasktypes.TaskFlow { + return &tasktypes.TaskFlow{ + OperationContext: &tasktypes.OperationContext{ + Name: "modify spec and watch", + Tasks: []tasktypes.TaskName{ + tModifyClusterSpec, + tWaitForClusterReturnRunning, + }, + TargetStatus: string(constants.ClusterOpStatusSucceeded), + OnFailure: tasktypes.FailureRule{ + NextTryStatus: string(constants.ClusterOpStatusFailed), + }, + }, + } +} + +func genRestartOBServersOnlyFlow(_ *OBClusterOperationManager) *tasktypes.TaskFlow { + return &tasktypes.TaskFlow{ + OperationContext: &tasktypes.OperationContext{ + Name: "restart observers only", + Tasks: []tasktypes.TaskName{ + tRestartOBServers, + }, + TargetStatus: string(constants.ClusterOpStatusSucceeded), + OnFailure: tasktypes.FailureRule{ + NextTryStatus: string(constants.ClusterOpStatusFailed), + }, + }, + } +} diff --git a/internal/resource/obclusteroperation/obclusteroperation_manager.go b/internal/resource/obclusteroperation/obclusteroperation_manager.go new file mode 100644 index 000000000..40f07c570 --- /dev/null +++ b/internal/resource/obclusteroperation/obclusteroperation_manager.go @@ -0,0 +1,180 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package obclusteroperation + +import ( + "context" + "strings" + + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oceanbase/ob-operator/api/constants" + apitypes "github.com/oceanbase/ob-operator/api/types" + v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" + "github.com/oceanbase/ob-operator/internal/telemetry" + opresource "github.com/oceanbase/ob-operator/pkg/coordinator" + taskstatus "github.com/oceanbase/ob-operator/pkg/task/const/status" + "github.com/oceanbase/ob-operator/pkg/task/const/strategy" + tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" +) + +var _ opresource.ResourceManager = &OBClusterOperationManager{} + +type OBClusterOperationManager struct { + Ctx context.Context + Resource *v1alpha1.OBClusterOperation + Client client.Client + Recorder telemetry.Recorder + Logger *logr.Logger +} + +func (m *OBClusterOperationManager) GetMeta() metav1.Object { + return m.Resource.GetObjectMeta() +} + +func (m *OBClusterOperationManager) GetStatus() string { + return string(m.Resource.Status.Status) +} + +func (m *OBClusterOperationManager) CheckAndUpdateFinalizers() error { + return nil +} + +func (m *OBClusterOperationManager) InitStatus() { + m.Resource.Status.Status = constants.ClusterOpStatusRunning + obcluster := &v1alpha1.OBCluster{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: m.Resource.Spec.OBCluster, + }, obcluster) + if err != nil { + m.Logger.V(oceanbaseconst.LogLevelDebug).WithValues("err", err).Info("Failed to find obcluster") + return + } + m.Resource.Status.ClusterSnapshot = &v1alpha1.OBClusterSnapshot{ + Spec: &obcluster.Spec, + Status: &obcluster.Status, + } +} + +func (m *OBClusterOperationManager) SetOperationContext(c *tasktypes.OperationContext) { + m.Resource.Status.OperationContext = c +} + +func (m *OBClusterOperationManager) ClearTaskInfo() { + m.Resource.Status.Status = constants.ClusterOpStatusRunning + m.Resource.Status.OperationContext = nil +} + +func (m *OBClusterOperationManager) HandleFailure() { + if m.Resource.DeletionTimestamp != nil { + m.Resource.Status.OperationContext = nil + } else { + operationContext := m.Resource.Status.OperationContext + failureRule := operationContext.OnFailure + switch failureRule.Strategy { + case strategy.StartOver: + if m.Resource.Status.Status != apitypes.ClusterOperationStatus(failureRule.NextTryStatus) { + m.Resource.Status.Status = apitypes.ClusterOperationStatus(failureRule.NextTryStatus) + m.Resource.Status.OperationContext = nil + } else { + m.Resource.Status.OperationContext.Idx = 0 + m.Resource.Status.OperationContext.TaskStatus = "" + m.Resource.Status.OperationContext.TaskId = "" + m.Resource.Status.OperationContext.Task = "" + } + case strategy.RetryFromCurrent: + operationContext.TaskStatus = taskstatus.Pending + case strategy.Pause: + default: + m.Resource.Status.OperationContext = nil + if failureRule.NextTryStatus == "" { + m.Resource.Status.Status = constants.ClusterOpStatusFailed + } else { + m.Resource.Status.Status = apitypes.ClusterOperationStatus(failureRule.NextTryStatus) + } + } + } +} + +func (m *OBClusterOperationManager) FinishTask() { + m.Resource.Status.Status = apitypes.ClusterOperationStatus(m.Resource.Status.OperationContext.TargetStatus) + m.Resource.Status.OperationContext = nil +} + +func (m *OBClusterOperationManager) UpdateStatus() error { + return m.retryUpdateStatus() +} + +func (m *OBClusterOperationManager) ArchiveResource() { + m.Logger.Info("Archive obcluster operation", "obcluster operation", m.Resource.Name) + m.Recorder.Event(m.Resource, "Archive", "", "Archive obcluster operation") + m.Resource.Status.Status = constants.ClusterOpStatusFailed + m.Resource.Status.OperationContext = nil +} + +func (m *OBClusterOperationManager) GetTaskFunc(name tasktypes.TaskName) (tasktypes.TaskFunc, error) { + return taskMap.GetTask(name, m) +} + +func (m *OBClusterOperationManager) GetTaskFlow() (*tasktypes.TaskFlow, error) { + if m.Resource.Status.OperationContext != nil { + return tasktypes.NewTaskFlow(m.Resource.Status.OperationContext), nil + } + var taskFlow *tasktypes.TaskFlow + status := m.Resource.Status.Status + switch status { + case constants.ClusterOpStatusRunning: + if strings.EqualFold(string(m.Resource.Spec.Type), string(constants.ClusterOpTypeRestartOBServers)) && + m.Resource.Spec.RestartOBServers != nil { + taskFlow = genRestartOBServersOnlyFlow(m) + } else { + taskFlow = genModifySpecAndWatchFlow(m) + } + case constants.ClusterOpStatusPending, + constants.ClusterOpStatusSucceeded, + constants.ClusterOpStatusFailed: + fallthrough + default: + return nil, nil + } + if taskFlow.OperationContext.OnFailure.NextTryStatus == "" { + taskFlow.OperationContext.OnFailure.NextTryStatus = string(constants.TenantOpFailed) + } + return taskFlow, nil +} + +func (m *OBClusterOperationManager) PrintErrEvent(err error) { + m.Recorder.Event(m.Resource, corev1.EventTypeWarning, "Task failed", err.Error()) +} + +func (m *OBClusterOperationManager) retryUpdateStatus() error { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + resource := &v1alpha1.OBClusterOperation{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.GetNamespace(), + Name: m.Resource.GetName(), + }, resource) + if err != nil { + return client.IgnoreNotFound(err) + } + resource.Status = m.Resource.Status + return m.Client.Status().Update(m.Ctx, m.Resource) + }) +} diff --git a/internal/resource/obclusteroperation/obclusteroperation_task.go b/internal/resource/obclusteroperation/obclusteroperation_task.go new file mode 100644 index 000000000..84e9236c2 --- /dev/null +++ b/internal/resource/obclusteroperation/obclusteroperation_task.go @@ -0,0 +1,279 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package obclusteroperation + +import ( + "errors" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oceanbase/ob-operator/api/constants" + apitypes "github.com/oceanbase/ob-operator/api/types" + v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" + clusterstatus "github.com/oceanbase/ob-operator/internal/const/status/obcluster" + serverstatus "github.com/oceanbase/ob-operator/internal/const/status/observer" + "github.com/oceanbase/ob-operator/pkg/task/builder" + tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" +) + +//go:generate task_register $GOFILE + +var taskMap = builder.NewTaskHub[*OBClusterOperationManager]() + +func ModifyClusterSpec(m *OBClusterOperationManager) tasktypes.TaskError { + obcluster := &v1alpha1.OBCluster{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: m.Resource.Spec.OBCluster, + }, obcluster) + if err != nil { + m.Logger.Error(err, "Failed to find obcluster") + return err + } + origin := obcluster.DeepCopy() + switch m.Resource.Spec.Type { + case constants.ClusterOpTypeAddZones: + if len(m.Resource.Spec.AddZones) == 0 { + return errors.New("AddZones is empty") + } + obcluster.Spec.Topology = append(obcluster.Spec.Topology, m.Resource.Spec.AddZones...) + case constants.ClusterOpTypeDeleteZones: + if len(m.Resource.Spec.DeleteZones) == 0 { + return errors.New("DeleteZones is empty") + } + deletingMap := make(map[string]struct{}) + for _, zone := range m.Resource.Spec.DeleteZones { + deletingMap[zone] = struct{}{} + } + remainList := make([]apitypes.OBZoneTopology, 0) + for i, t := range obcluster.Spec.Topology { + if _, ok := deletingMap[t.Zone]; !ok { + remainList = append(remainList, obcluster.Spec.Topology[i]) + } + } + obcluster.Spec.Topology = remainList + case constants.ClusterOpTypeAdjustReplicas: + if len(m.Resource.Spec.AdjustReplicas) == 0 { + return errors.New("AdjustReplicas is empty") + } + for _, adjust := range m.Resource.Spec.AdjustReplicas { + adjustingMap := make(map[string]struct{}) + for _, a := range adjust.Zones { + adjustingMap[a] = struct{}{} + } + for i, t := range obcluster.Spec.Topology { + if _, ok := adjustingMap[t.Zone]; ok { + if adjust.To > 0 { + obcluster.Spec.Topology[i].Replica = adjust.To + } + } + } + } + case constants.ClusterOpTypeRestartOBServers: + // This is not a real operation, just a placeholder for the task + case constants.ClusterOpTypeModifyOBServers: + if m.Resource.Spec.ModifyOBServers == nil { + return errors.New("modifyOBServers is empty") + } + if m.Resource.Spec.ModifyOBServers.ExpandStorageSize != nil { + mutation := m.Resource.Spec.ModifyOBServers.ExpandStorageSize + if mutation.DataStorage != nil { + obcluster.Spec.OBServerTemplate.Storage.DataStorage.Size = *mutation.DataStorage + } + if mutation.LogStorage != nil { + obcluster.Spec.OBServerTemplate.Storage.LogStorage.Size = *mutation.LogStorage + } + if mutation.RedoLogStorage != nil { + obcluster.Spec.OBServerTemplate.Storage.RedoLogStorage.Size = *mutation.RedoLogStorage + } + } + if m.Resource.Spec.ModifyOBServers.ModifyStorageClass != nil { + mutation := m.Resource.Spec.ModifyOBServers.ModifyStorageClass + if mutation.DataStorage != "" { + obcluster.Spec.OBServerTemplate.Storage.DataStorage.StorageClass = mutation.DataStorage + } + if mutation.LogStorage != "" { + obcluster.Spec.OBServerTemplate.Storage.LogStorage.StorageClass = mutation.LogStorage + } + if mutation.RedoLogStorage != "" { + obcluster.Spec.OBServerTemplate.Storage.RedoLogStorage.StorageClass = mutation.RedoLogStorage + } + } + supportStaticIP := obcluster.Annotations[oceanbaseconst.AnnotationsSupportStaticIP] == "true" + if m.Resource.Spec.ModifyOBServers.AddingMonitor != nil && supportStaticIP { + obcluster.Spec.MonitorTemplate = m.Resource.Spec.ModifyOBServers.AddingMonitor + } + if m.Resource.Spec.ModifyOBServers.AddingBackupVolume != nil && supportStaticIP { + obcluster.Spec.BackupVolume = m.Resource.Spec.ModifyOBServers.AddingBackupVolume + } + if m.Resource.Spec.ModifyOBServers.Resource != nil && supportStaticIP { + obcluster.Spec.OBServerTemplate.Resource = m.Resource.Spec.ModifyOBServers.Resource + } + case constants.ClusterOpTypeUpgrade: + if m.Resource.Spec.Upgrade == nil { + return errors.New("Upgrade is empty") + } + if m.Resource.Spec.Upgrade.Image == "" { + return errors.New("Upgrading image is empty") + } + obcluster.Spec.OBServerTemplate.Image = m.Resource.Spec.Upgrade.Image + case constants.ClusterOpTypeSetParameters: + if m.Resource.Spec.SetParameters == nil { + return errors.New("setParameters is empty") + } + newParamMap := make(map[string]string) + for _, v := range m.Resource.Spec.SetParameters { + newParamMap[v.Name] = v.Value + } + existingMap := make(map[string]struct{}) + for i, v := range obcluster.Spec.Parameters { + if val, ok := newParamMap[v.Name]; ok { + obcluster.Spec.Parameters[i].Value = val + } + existingMap[v.Name] = struct{}{} + } + for k, v := range newParamMap { + if _, ok := existingMap[k]; !ok { + obcluster.Spec.Parameters = append(obcluster.Spec.Parameters, apitypes.Parameter{Name: k, Value: v}) + } + } + } + if m.Resource.Spec.Force { + obcluster.Status.Status = clusterstatus.Running + obcluster.Status.OperationContext = nil + } else if obcluster.Status.Status != clusterstatus.Running { + return errors.New("obcluster is not running") + } + oldResourceVersion := obcluster.ResourceVersion + err = m.Client.Patch(m.Ctx, obcluster, client.MergeFrom(origin)) + if err != nil { + m.Logger.Error(err, "Failed to patch obcluster") + return err + } + newResourceVersion := obcluster.ResourceVersion + if oldResourceVersion == newResourceVersion { + m.Logger.Info("obcluster not changed") + return nil + } + err = m.waitForOBClusterStatusToMatch(obcfg.GetConfig().Time.DefaultStateWaitTimeout, func(status string) bool { + return status != clusterstatus.Running + }) + if err != nil { + return errors.New("Timeout to wait for cluster to be operating") + } + return nil +} + +func WaitForClusterReturnRunning(m *OBClusterOperationManager) tasktypes.TaskError { + timeout := obcfg.GetConfig().Time.DefaultStateWaitTimeout + if m.Resource.Spec.Type == constants.ClusterOpTypeModifyOBServers && + m.Resource.Spec.ModifyOBServers != nil && + m.Resource.Spec.ModifyOBServers.ModifyStorageClass != nil { + timeout = obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds + } + err := m.waitForOBClusterStatusToMatch(timeout, func(status string) bool { + return status == clusterstatus.Running + }) + if err != nil { + return errors.New("Timeout to wait for cluster to be running") + } + return nil +} + +func RestartOBServers(m *OBClusterOperationManager) tasktypes.TaskError { + restartingServers := make([]v1alpha1.OBServer, 0) + var err error + obcluster := &v1alpha1.OBCluster{} + err = m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: m.Resource.Spec.OBCluster, + }, obcluster) + if err != nil { + m.Logger.Error(err, "Failed to find obcluster") + return err + } + if obcluster.Status.Status != clusterstatus.Running && !m.Resource.Spec.Force { + return errors.New("RestartOBServers requires obcluster to be running") + } + if obcluster.Annotations[oceanbaseconst.AnnotationsSupportStaticIP] != "true" { + return errors.New("RestartOBServers requires obcluster's support for static ip") + } + + observerList := v1alpha1.OBServerList{} + err = m.Client.List(m.Ctx, &observerList, client.InNamespace(m.Resource.Namespace), client.MatchingLabels{ + oceanbaseconst.LabelRefOBCluster: m.Resource.Spec.OBCluster, + }) + if err != nil { + m.Logger.Error(err, "Failed to list observers") + return err + } + + if m.Resource.Spec.RestartOBServers.All { + restartingServers = append(restartingServers, observerList.Items...) + } else if len(m.Resource.Spec.RestartOBServers.OBZones) > 0 { + filterZoneMap := make(map[string]struct{}) + for _, zone := range m.Resource.Spec.RestartOBServers.OBZones { + filterZoneMap[zone] = struct{}{} + } + for _, observer := range observerList.Items { + if _, ok := filterZoneMap[observer.Labels[oceanbaseconst.LabelRefOBZone]]; ok { + restartingServers = append(restartingServers, observer) + } + } + } else if len(m.Resource.Spec.RestartOBServers.OBServers) > 0 { + filterObserverMap := make(map[string]struct{}) + for _, observer := range m.Resource.Spec.RestartOBServers.OBServers { + filterObserverMap[observer] = struct{}{} + } + for _, observer := range observerList.Items { + if _, ok := filterObserverMap[observer.Name]; ok { + restartingServers = append(restartingServers, observer) + } + } + } + + for _, observer := range restartingServers { + pod := corev1.Pod{} + err = m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: observer.Namespace, + Name: observer.Name, + }, &pod) + if err != nil { + m.Logger.Error(err, "Failed to find pod") + return err + } + err = m.Client.Delete(m.Ctx, &pod) + if err != nil { + m.Logger.Error(err, "Failed to delete pod") + return err + } + timeout := obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds + err = m.waitForOBServerStatusToMatch(observer.Name, timeout, func(status string) bool { + return status != serverstatus.Running + }) + if err != nil { + return errors.New("Timeout to wait for server to be operating") + } + err = m.waitForOBServerStatusToMatch(observer.Name, timeout, func(status string) bool { + return status == serverstatus.Running + }) + if err != nil { + return errors.New("Timeout to wait for server to be running") + } + } + return nil +} diff --git a/internal/resource/obclusteroperation/obclusteroperation_task_gen.go b/internal/resource/obclusteroperation/obclusteroperation_task_gen.go new file mode 100644 index 000000000..6c8879e99 --- /dev/null +++ b/internal/resource/obclusteroperation/obclusteroperation_task_gen.go @@ -0,0 +1,8 @@ +// Code generated by go generate; DO NOT EDIT. +package obclusteroperation + +func init() { + taskMap.Register(tModifyClusterSpec, ModifyClusterSpec) + taskMap.Register(tWaitForClusterReturnRunning, WaitForClusterReturnRunning) + taskMap.Register(tRestartOBServers, RestartOBServers) +} diff --git a/internal/resource/obclusteroperation/obclusteroperation_taskname_gen.go b/internal/resource/obclusteroperation/obclusteroperation_taskname_gen.go new file mode 100644 index 000000000..1eed8ec0d --- /dev/null +++ b/internal/resource/obclusteroperation/obclusteroperation_taskname_gen.go @@ -0,0 +1,10 @@ +// Code generated by go generate; DO NOT EDIT. +package obclusteroperation + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tModifyClusterSpec ttypes.TaskName = "modify cluster spec" + tWaitForClusterReturnRunning ttypes.TaskName = "wait for cluster return running" + tRestartOBServers ttypes.TaskName = "restart observers" +) diff --git a/internal/resource/obclusteroperation/utils.go b/internal/resource/obclusteroperation/utils.go new file mode 100644 index 000000000..28eeb41b6 --- /dev/null +++ b/internal/resource/obclusteroperation/utils.go @@ -0,0 +1,125 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package obclusteroperation + +import ( + "time" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + + v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" + "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/operation" +) + +// get operation manager to exec sql +func (m *OBClusterOperationManager) getTenantRootClient(tenantName string) (*operation.OceanbaseOperationManager, error) { + tenant := &v1alpha1.OBTenant{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: tenantName, + }, tenant) + if err != nil { + return nil, errors.Wrap(err, "get tenant") + } + obcluster := &v1alpha1.OBCluster{} + err = m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: tenant.Spec.ClusterName, + }, obcluster) + if err != nil { + return nil, errors.Wrap(err, "get obcluster") + } + var con *operation.OceanbaseOperationManager + con, err = resourceutils.GetTenantRootOperationClient(m.Client, m.Logger, obcluster, tenant.Spec.TenantName, tenant.Status.Credentials.Root) + if err != nil { + return nil, errors.Wrap(err, "get oceanbase operation manager") + } + return con, nil +} + +func (m *OBClusterOperationManager) getClusterSysClient(clusterName string) (*operation.OceanbaseOperationManager, error) { + var err error + obcluster := &v1alpha1.OBCluster{} + err = m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: clusterName, + }, obcluster) + if err != nil { + return nil, errors.Wrap(err, "get obcluster") + } + con, err := resourceutils.GetSysOperationClient(m.Client, m.Logger, obcluster) + if err != nil { + return nil, errors.Wrap(err, "get cluster sys client") + } + return con, nil +} + +func (m *OBClusterOperationManager) retryUpdateTenant(obj *v1alpha1.OBTenant) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + tenant := &v1alpha1.OBTenant{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: obj.Name, + }, tenant) + if err != nil { + return errors.Wrap(err, "get tenant") + } + tenant.Status = obj.Status + return m.Client.Status().Update(m.Ctx, tenant) + }) +} + +type matchFunc func(string) bool + +func (m *OBClusterOperationManager) waitForOBClusterStatusToMatch(timeout int, match matchFunc) error { + obcluster := &v1alpha1.OBCluster{} + for i := 0; i < timeout; i++ { + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: m.Resource.Spec.OBCluster, + }, obcluster) + if err != nil { + m.Logger.Error(err, "Failed to find obcluster") + return err + } + if match(obcluster.Status.Status) { + return nil + } + time.Sleep(time.Second) + } + m.Logger.WithValues("currentStatus", obcluster.Status.Status).Info("Timeout to wait for obcluster to match desired status") + return errors.New("Timeout to wait for obcluster to match desired status") +} + +func (m *OBClusterOperationManager) waitForOBServerStatusToMatch(server string, timeout int, match matchFunc) error { + observer := &v1alpha1.OBServer{} + for i := 0; i < timeout; i++ { + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.Namespace, + Name: server, + }, observer) + if err != nil { + m.Logger.Error(err, "Failed to find obcluster") + return err + } + if match(observer.Status.Status) { + return nil + } + time.Sleep(time.Second) + } + m.Logger.WithValues("currentStatus", observer.Status.Status).Info("Timeout to wait for observer to match desired status") + return errors.New("Timeout to wait for observer to match desired status") +} diff --git a/internal/resource/obparameter/names.go b/internal/resource/obparameter/names.go deleted file mode 100644 index d8f01ebfd..000000000 --- a/internal/resource/obparameter/names.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obparameter - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -// obparameter flows -const ( - fSetOBParameter ttypes.FlowName = "set ob parameter" -) - -// obparameter tasks -const ( - tSetOBParameter ttypes.TaskName = "set ob parameter" -) diff --git a/internal/resource/obparameter/obparameter_flow.go b/internal/resource/obparameter/obparameter_flow.go index 4ca7956f5..7eb01e381 100644 --- a/internal/resource/obparameter/obparameter_flow.go +++ b/internal/resource/obparameter/obparameter_flow.go @@ -20,7 +20,7 @@ import ( func genSetOBParameterFlow(_ *OBParameterManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fSetOBParameter, + Name: "set ob parameter", Tasks: []tasktypes.TaskName{tSetOBParameter}, TargetStatus: parameterstatus.Matched, }, diff --git a/internal/resource/obparameter/obparameter_task.go b/internal/resource/obparameter/obparameter_task.go index 76f083ad3..7eb341b6f 100644 --- a/internal/resource/obparameter/obparameter_task.go +++ b/internal/resource/obparameter/obparameter_task.go @@ -19,7 +19,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*OBParameterManager]() diff --git a/internal/resource/obparameter/obparameter_taskname_gen.go b/internal/resource/obparameter/obparameter_taskname_gen.go new file mode 100644 index 000000000..fb64b4462 --- /dev/null +++ b/internal/resource/obparameter/obparameter_taskname_gen.go @@ -0,0 +1,8 @@ +// Code generated by go generate; DO NOT EDIT. +package obparameter + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tSetOBParameter ttypes.TaskName = "set obparameter" +) diff --git a/internal/resource/observer/names.go b/internal/resource/observer/names.go deleted file mode 100644 index 13da973a4..000000000 --- a/internal/resource/observer/names.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package observer - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -// observer flows -const ( - fPrepareOBServerForBootstrap ttypes.FlowName = "prepare observer for bootstrap" - fMaintainOBServerAfterBootstrap ttypes.FlowName = "maintain observer after bootstrap" - fCreateOBServer ttypes.FlowName = "create observer" - fDeleteOBServerFinalizer ttypes.FlowName = "delete observer finalizer" - fUpgradeOBServer ttypes.FlowName = "upgrade observer" - fRecoverOBServer ttypes.FlowName = "recover observer" - fAnnotateOBServerPod ttypes.FlowName = "annotate observer pod" - fScaleUpOBServer ttypes.FlowName = "scale up observer" - fExpandPVC ttypes.FlowName = "expand pvc for observer" - fMountBackupVolume ttypes.FlowName = "mount backup volume for observer" -) - -// observer tasks -const ( - tWaitOBClusterBootstrapped ttypes.TaskName = "wait obcluster bootstrapped" - tCreateOBServerSvc ttypes.TaskName = "create observer svc" - tCreateOBPVC ttypes.TaskName = "create observer pvc" - tCreateOBPod ttypes.TaskName = "create observer pod" - tAnnotateOBServerPod ttypes.TaskName = "annotate observer pod" - tWaitOBServerReady ttypes.TaskName = "wait observer ready" - tStartOBServer ttypes.TaskName = "start observer" - tAddServer ttypes.TaskName = "add observer" - tDeleteOBServerInCluster ttypes.TaskName = "delete observer in cluster" - tWaitOBServerDeletedInCluster ttypes.TaskName = "wait observer deleted in cluster" - tWaitOBServerPodReady ttypes.TaskName = "wait observer pod ready" - tWaitOBServerActiveInCluster ttypes.TaskName = "wait observer active in cluster" - tUpgradeOBServerImage ttypes.TaskName = "upgrade observer image" - tDeletePod ttypes.TaskName = "delete pod" - tWaitForPodDeleted ttypes.TaskName = "wait for pod being deleted" - tExpandPVC ttypes.TaskName = "expand pvc" - tWaitForPVCResized ttypes.TaskName = "wait for pvc being resized" - tMountBackupVolume ttypes.TaskName = "mount backup volume" - tWaitForBackupVolumeMounted ttypes.TaskName = "wait for backup volume to be mounted" -) diff --git a/internal/resource/observer/observer_flow.go b/internal/resource/observer/observer_flow.go index 6d407db5f..8876c021c 100644 --- a/internal/resource/observer/observer_flow.go +++ b/internal/resource/observer/observer_flow.go @@ -21,8 +21,8 @@ import ( func genPrepareOBServerForBootstrapFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fPrepareOBServerForBootstrap, - Tasks: []tasktypes.TaskName{tCreateOBServerSvc, tCreateOBPVC, tCreateOBPod, tWaitOBServerReady}, + Name: "prepare observer for bootstrap", + Tasks: []tasktypes.TaskName{tCreateOBServerSvc, tCreateOBServerPVC, tCreateOBServerPod, tWaitOBServerReady}, TargetStatus: serverstatus.BootstrapReady, }, } @@ -31,7 +31,7 @@ func genPrepareOBServerForBootstrapFlow(_ *OBServerManager) *tasktypes.TaskFlow func genMaintainOBServerAfterBootstrapFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainOBServerAfterBootstrap, + Name: "maintain observer after bootstrap", Tasks: []tasktypes.TaskName{tWaitOBClusterBootstrapped, tAddServer, tWaitOBServerActiveInCluster}, TargetStatus: serverstatus.Running, }, @@ -41,8 +41,8 @@ func genMaintainOBServerAfterBootstrapFlow(_ *OBServerManager) *tasktypes.TaskFl func genCreateOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCreateOBServer, - Tasks: []tasktypes.TaskName{tCreateOBServerSvc, tCreateOBPVC, tCreateOBPod, tWaitOBServerReady, tAddServer, tWaitOBServerActiveInCluster}, + Name: "create observer", + Tasks: []tasktypes.TaskName{tCreateOBServerSvc, tCreateOBServerPVC, tCreateOBServerPod, tWaitOBServerReady, tAddServer, tWaitOBServerActiveInCluster}, TargetStatus: serverstatus.Running, OnFailure: tasktypes.FailureRule{ NextTryStatus: "Failed", @@ -54,7 +54,7 @@ func genCreateOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { func genDeleteOBServerFinalizerFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeleteOBServerFinalizer, + Name: "delete observer finalizer", Tasks: []tasktypes.TaskName{tDeleteOBServerInCluster, tWaitOBServerDeletedInCluster}, TargetStatus: serverstatus.FinalizerFinished, }, @@ -64,7 +64,7 @@ func genDeleteOBServerFinalizerFlow(_ *OBServerManager) *tasktypes.TaskFlow { func genUpgradeOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fUpgradeOBServer, + Name: "upgrade observer", Tasks: []tasktypes.TaskName{tUpgradeOBServerImage, tWaitOBServerPodReady, tWaitOBServerActiveInCluster}, TargetStatus: serverstatus.Running, }, @@ -74,8 +74,8 @@ func genUpgradeOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { func genRecoverOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRecoverOBServer, - Tasks: []tasktypes.TaskName{tCreateOBPod, tWaitOBServerReady, tAddServer, tWaitOBServerActiveInCluster}, + Name: "recover observer", + Tasks: []tasktypes.TaskName{tCreateOBServerPod, tWaitOBServerReady, tAddServer, tWaitOBServerActiveInCluster}, TargetStatus: serverstatus.Running, OnFailure: tasktypes.FailureRule{ Strategy: strategy.RetryFromCurrent, @@ -87,18 +87,18 @@ func genRecoverOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { func genAnnotateOBServerPodFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fAnnotateOBServerPod, + Name: "annotate observer pod", Tasks: []tasktypes.TaskName{tAnnotateOBServerPod}, TargetStatus: serverstatus.Running, }, } } -func genScaleUpOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { +func genScaleOBServerVerticallyFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fScaleUpOBServer, - Tasks: []tasktypes.TaskName{tDeletePod, tWaitForPodDeleted, tCreateOBPod, tWaitOBServerReady, tWaitOBServerActiveInCluster}, + Name: "scale observer vertically", + Tasks: []tasktypes.TaskName{tDeletePod, tWaitForPodDeleted, tCreateOBServerPod, tWaitOBServerReady, tWaitOBServerActiveInCluster}, TargetStatus: serverstatus.Running, }, } @@ -107,8 +107,8 @@ func genScaleUpOBServerFlow(_ *OBServerManager) *tasktypes.TaskFlow { func genExpandPVCFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fExpandPVC, - Tasks: []tasktypes.TaskName{tExpandPVC, tWaitForPVCResized}, + Name: "expand pvc", + Tasks: []tasktypes.TaskName{tExpandPVC, tWaitForPvcResized}, TargetStatus: serverstatus.Running, OnFailure: tasktypes.FailureRule{ Strategy: strategy.StartOver, @@ -117,11 +117,11 @@ func genExpandPVCFlow(_ *OBServerManager) *tasktypes.TaskFlow { } } -func genMountBackupVolumeFlow(_ *OBServerManager) *tasktypes.TaskFlow { +func genModifyPodTemplateFlow(_ *OBServerManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMountBackupVolume, - Tasks: []tasktypes.TaskName{tDeletePod, tWaitForPodDeleted, tCreateOBPod, tWaitOBServerReady}, + Name: "modify pod template", + Tasks: []tasktypes.TaskName{tDeletePod, tWaitForPodDeleted, tCreateOBServerPod, tWaitOBServerReady}, TargetStatus: serverstatus.Running, OnFailure: tasktypes.FailureRule{ Strategy: strategy.StartOver, diff --git a/internal/resource/observer/observer_manager.go b/internal/resource/observer/observer_manager.go index ff57041c3..af0fb4539 100644 --- a/internal/resource/observer/observer_manager.go +++ b/internal/resource/observer/observer_manager.go @@ -115,17 +115,14 @@ func (m *OBServerManager) UpdateStatus() error { if m.OBServer.Status.Status == serverstatus.Running { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Check observer in obcluster") if m.OBServer.SupportStaticIP() { - if len(pod.Spec.Containers) > 0 { - tmplRes := m.OBServer.Spec.OBServerTemplate.Resource - containerRes := pod.Spec.Containers[0].Resources.Limits - if containerRes.Cpu().Cmp(tmplRes.Cpu) != 0 || containerRes.Memory().Cmp(tmplRes.Memory) != 0 { - m.OBServer.Status.Status = serverstatus.ScaleUp - } + if m.checkIfResourceChanged(pod) { + m.OBServer.Status.Status = serverstatus.ScaleVertically + } + if m.checkIfBackupVolumeMutated(pod) || m.checkIfMonitorMutated(pod) { + m.OBServer.Status.Status = serverstatus.ModifyingPodTemplate } } else if pvcs != nil && len(pvcs.Items) > 0 && m.checkIfStorageExpand(pvcs) { m.OBServer.Status.Status = serverstatus.ExpandPVC - } else if m.checkIfBackupVolumeAdded(pod) { - m.OBServer.Status.Status = serverstatus.MountBackupVolume } } @@ -222,13 +219,12 @@ func (m *OBServerManager) GetTaskFlow() (*tasktypes.TaskFlow, error) { taskFlow = genRecoverOBServerFlow(m) case serverstatus.Annotate: taskFlow = genAnnotateOBServerPodFlow(m) - case serverstatus.ScaleUp: - taskFlow = genScaleUpOBServerFlow(m) + case serverstatus.ScaleVertically: + taskFlow = genScaleOBServerVerticallyFlow(m) case serverstatus.ExpandPVC: taskFlow = genExpandPVCFlow(m) - case serverstatus.MountBackupVolume: - m.Logger.V(oceanbaseconst.LogLevelTrace).Info("Get task flow when observer need to mount backup volume") - taskFlow = genMountBackupVolumeFlow(m) + case serverstatus.ModifyingPodTemplate: + taskFlow = genModifyPodTemplateFlow(m) default: m.Logger.V(oceanbaseconst.LogLevelTrace).Info("No need to run anything for observer") return nil, nil diff --git a/internal/resource/observer/observer_task.go b/internal/resource/observer/observer_task.go index 41b5f7fd7..16d428bb4 100644 --- a/internal/resource/observer/observer_task.go +++ b/internal/resource/observer/observer_task.go @@ -35,7 +35,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*OBServerManager]() @@ -97,7 +97,7 @@ func WaitOBClusterBootstrapped(m *OBServerManager) tasktypes.TaskError { return errors.New("Timeout to wait obcluster bootstrapped") } -func CreateOBPod(m *OBServerManager) tasktypes.TaskError { +func CreateOBServerPod(m *OBServerManager) tasktypes.TaskError { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Create observer pod") obcluster, err := m.getOBCluster() if err != nil { @@ -135,7 +135,7 @@ func CreateOBPod(m *OBServerManager) tasktypes.TaskError { return nil } -func CreateOBPVC(m *OBServerManager) tasktypes.TaskError { +func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { ownerReferenceList := make([]metav1.OwnerReference, 0) sepVolumeAnnoVal, sepVolumeAnnoExist := resourceutils.GetAnnotationField(m.OBServer, oceanbaseconst.AnnotationsIndependentPVCLifecycle) if !sepVolumeAnnoExist || sepVolumeAnnoVal != "true" { @@ -441,7 +441,7 @@ func ExpandPVC(m *OBServerManager) tasktypes.TaskError { return nil } -func WaitForPVCResized(m *OBServerManager) tasktypes.TaskError { +func WaitForPvcResized(m *OBServerManager) tasktypes.TaskError { outer: for i := 0; i < obcfg.GetConfig().Time.DefaultStateWaitTimeout; i++ { time.Sleep(time.Second) @@ -524,11 +524,3 @@ func CreateOBServerSvc(m *OBServerManager) tasktypes.TaskError { } return nil } - -func MountBackupVolume(_ *OBServerManager) tasktypes.TaskError { - return nil -} - -func WaitForBackupVolumeMounted(_ *OBServerManager) tasktypes.TaskError { - return nil -} diff --git a/internal/resource/observer/observer_task_gen.go b/internal/resource/observer/observer_task_gen.go index c3e582a2a..80b34cf47 100644 --- a/internal/resource/observer/observer_task_gen.go +++ b/internal/resource/observer/observer_task_gen.go @@ -5,8 +5,8 @@ func init() { taskMap.Register(tWaitOBServerReady, WaitOBServerReady) taskMap.Register(tAddServer, AddServer) taskMap.Register(tWaitOBClusterBootstrapped, WaitOBClusterBootstrapped) - taskMap.Register(tCreateOBPod, CreateOBPod) - taskMap.Register(tCreateOBPVC, CreateOBPVC) + taskMap.Register(tCreateOBServerPod, CreateOBServerPod) + taskMap.Register(tCreateOBServerPVC, CreateOBServerPVC) taskMap.Register(tDeleteOBServerInCluster, DeleteOBServerInCluster) taskMap.Register(tAnnotateOBServerPod, AnnotateOBServerPod) taskMap.Register(tUpgradeOBServerImage, UpgradeOBServerImage) @@ -16,8 +16,6 @@ func init() { taskMap.Register(tDeletePod, DeletePod) taskMap.Register(tWaitForPodDeleted, WaitForPodDeleted) taskMap.Register(tExpandPVC, ExpandPVC) - taskMap.Register(tWaitForPVCResized, WaitForPVCResized) + taskMap.Register(tWaitForPvcResized, WaitForPvcResized) taskMap.Register(tCreateOBServerSvc, CreateOBServerSvc) - taskMap.Register(tMountBackupVolume, MountBackupVolume) - taskMap.Register(tWaitForBackupVolumeMounted, WaitForBackupVolumeMounted) } diff --git a/internal/resource/observer/observer_taskname_gen.go b/internal/resource/observer/observer_taskname_gen.go new file mode 100644 index 000000000..8fda9cddd --- /dev/null +++ b/internal/resource/observer/observer_taskname_gen.go @@ -0,0 +1,23 @@ +// Code generated by go generate; DO NOT EDIT. +package observer + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tWaitOBServerReady ttypes.TaskName = "wait observer ready" + tAddServer ttypes.TaskName = "add server" + tWaitOBClusterBootstrapped ttypes.TaskName = "wait obcluster bootstrapped" + tCreateOBServerPod ttypes.TaskName = "create observer pod" + tCreateOBServerPVC ttypes.TaskName = "create observer pvc" + tDeleteOBServerInCluster ttypes.TaskName = "delete observer in cluster" + tAnnotateOBServerPod ttypes.TaskName = "annotate observer pod" + tUpgradeOBServerImage ttypes.TaskName = "upgrade observer image" + tWaitOBServerPodReady ttypes.TaskName = "wait observer pod ready" + tWaitOBServerActiveInCluster ttypes.TaskName = "wait observer active in cluster" + tWaitOBServerDeletedInCluster ttypes.TaskName = "wait observer deleted in cluster" + tDeletePod ttypes.TaskName = "delete pod" + tWaitForPodDeleted ttypes.TaskName = "wait for pod deleted" + tExpandPVC ttypes.TaskName = "expand pvc" + tWaitForPvcResized ttypes.TaskName = "wait for pvc resized" + tCreateOBServerSvc ttypes.TaskName = "create observer svc" +) diff --git a/internal/resource/observer/utils.go b/internal/resource/observer/utils.go index 55f35d078..5f7a61e74 100644 --- a/internal/resource/observer/utils.go +++ b/internal/resource/observer/utils.go @@ -175,19 +175,50 @@ func (m *OBServerManager) checkIfStorageExpand(pvcs *corev1.PersistentVolumeClai return false } -func (m *OBServerManager) checkIfBackupVolumeAdded(pod *corev1.Pod) bool { - if m.OBServer.Spec.BackupVolume != nil && m.OBServer.Spec.BackupVolume.Volume != nil { - // If the backup volume is not mounted, it means the backup volume is added - for _, volume := range pod.Spec.Volumes { - if volume.Name == m.OBServer.Spec.BackupVolume.Volume.Name { - return false +func (m *OBServerManager) checkIfResourceChanged(pod *corev1.Pod) bool { + if len(pod.Spec.Containers) > 0 { + tmplRes := m.OBServer.Spec.OBServerTemplate.Resource + for i, container := range pod.Spec.Containers { + if container.Name == oceanbaseconst.ContainerName { + containerRes := pod.Spec.Containers[i].Resources.Limits + if containerRes.Cpu().Cmp(tmplRes.Cpu) != 0 || containerRes.Memory().Cmp(tmplRes.Memory) != 0 { + return true + } } } - return true } return false } +func (m *OBServerManager) checkIfBackupVolumeMutated(pod *corev1.Pod) bool { + addingVolume := m.OBServer.Spec.BackupVolume != nil + volumeExist := false + + for _, container := range pod.Spec.Containers { + if container.Name == oceanbaseconst.ContainerName { + for _, volumeMount := range container.VolumeMounts { + if volumeMount.MountPath == oceanbaseconst.BackupPath { + volumeExist = true + break + } + } + } + } + + return addingVolume != volumeExist +} + +func (m *OBServerManager) checkIfMonitorMutated(pod *corev1.Pod) bool { + addingMonitor := m.OBServer.Spec.MonitorTemplate != nil + monitorExist := false + for _, container := range pod.Spec.Containers { + if container.Name == obagentconst.ContainerName { + monitorExist = true + } + } + return addingMonitor != monitorExist +} + func (m *OBServerManager) generatePVCSpec(storageSpec *apitypes.StorageSpec) corev1.PersistentVolumeClaimSpec { pvcSpec := &corev1.PersistentVolumeClaimSpec{} requestsResources := corev1.ResourceList{} @@ -205,7 +236,6 @@ func (m *OBServerManager) createOBPodSpec(obcluster *v1alpha1.OBCluster) corev1. observerContainer := m.createOBServerContainer(obcluster) containers = append(containers, observerContainer) - // TODO, add monitor container volumes := make([]corev1.Volume, 0) singlePvcAnnoVal, singlePvcExist := resourceutils.GetAnnotationField(m.OBServer, oceanbaseconst.AnnotationsSinglePVC) diff --git a/internal/resource/obtenant/names.go b/internal/resource/obtenant/names.go deleted file mode 100644 index 0b7361d21..000000000 --- a/internal/resource/obtenant/names.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obtenant - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -const ( - fMaintainWhiteList ttypes.FlowName = "maintain white list" - fMaintainCharset ttypes.FlowName = "maintain charset" - fMaintainUnitNum ttypes.FlowName = "maintain unit num" - fMaintainLocality ttypes.FlowName = "maintain locality" - fMaintainPrimaryZone ttypes.FlowName = "maintain primary zone" - fMaintainUnitConfig ttypes.FlowName = "maintain unit config" - - fCreateTenant ttypes.FlowName = "create tenant" - fAddPool ttypes.FlowName = "add pool" - fDeletePool ttypes.FlowName = "delete pool" - fDeleteTenant ttypes.FlowName = "delete tenant" - fRestoreTenant ttypes.FlowName = "Restore tenant" - fCancelRestore ttypes.FlowName = "cancel restore" - fCreateEmptyStandbyTenant ttypes.FlowName = "create empty standby tenant" -) - -const ( - tCheckTenant ttypes.TaskName = "create tenant check" - tCheckPoolAndConfig ttypes.TaskName = "create pool and unit config check" - tCreateTenantWithClear ttypes.TaskName = "create tenant" - tCreateResourcePoolAndConfig ttypes.TaskName = "create resource pool and unit config" - tCheckAndApplyWhiteList ttypes.TaskName = "maintain white list" - tCheckAndApplyCharset ttypes.TaskName = "maintain charset" - tCheckAndApplyUnitNum ttypes.TaskName = "maintain unit num" - tCheckAndApplyLocality ttypes.TaskName = "maintain locality" - tCheckAndApplyPrimaryZone ttypes.TaskName = "maintain primary zone" - tAddPool ttypes.TaskName = "add resource pool" - tDeletePool ttypes.TaskName = "delete resource pool" - tMaintainUnitConfig ttypes.TaskName = "maintain unit config" - tDeleteTenant ttypes.TaskName = "delete tenant" - tCreateTenantRestoreJobCR ttypes.TaskName = "create restore job CR" - tWatchRestoreJobToFinish ttypes.TaskName = "watch restore job to finish" - tCancelTenantRestoreJob ttypes.TaskName = "cancel restore job" - tCreateUserWithCredentialSecrets ttypes.TaskName = "create users by credentials" - tCheckPrimaryTenantLSIntegrity ttypes.TaskName = "check primary tenant ls integrity" - tCreateEmptyStandbyTenant ttypes.TaskName = "create empty standby tenant" - tUpgradeTenantIfNeeded ttypes.TaskName = "upgrade tenant if needed" -) diff --git a/internal/resource/obtenant/obtenant_flow.go b/internal/resource/obtenant/obtenant_flow.go index 973481b65..4be339e78 100644 --- a/internal/resource/obtenant/obtenant_flow.go +++ b/internal/resource/obtenant/obtenant_flow.go @@ -21,7 +21,7 @@ import ( func genCreateTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCreateTenant, + Name: "create tenant", Tasks: []tasktypes.TaskName{ tCheckTenant, tCheckPoolAndConfig, @@ -40,7 +40,7 @@ func genCreateTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainWhiteListFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainWhiteList, + Name: "maintain white list", Tasks: []tasktypes.TaskName{tCheckAndApplyWhiteList}, TargetStatus: tenantstatus.Running, }, @@ -50,7 +50,7 @@ func genMaintainWhiteListFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainCharsetFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainCharset, + Name: "maintain charset", Tasks: []tasktypes.TaskName{tCheckAndApplyCharset}, TargetStatus: tenantstatus.Running, }, @@ -60,7 +60,7 @@ func genMaintainCharsetFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainUnitNumFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainUnitNum, + Name: "maintain unit num", Tasks: []tasktypes.TaskName{tCheckAndApplyUnitNum}, TargetStatus: tenantstatus.Running, }, @@ -70,7 +70,7 @@ func genMaintainUnitNumFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainLocalityFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainLocality, + Name: "maintain locality", Tasks: []tasktypes.TaskName{tCheckAndApplyLocality}, TargetStatus: tenantstatus.Running, }, @@ -80,7 +80,7 @@ func genMaintainLocalityFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainPrimaryZoneFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainPrimaryZone, + Name: "maintain primary zone", Tasks: []tasktypes.TaskName{tCheckAndApplyPrimaryZone}, TargetStatus: tenantstatus.Running, }, @@ -90,7 +90,7 @@ func genMaintainPrimaryZoneFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genAddPoolFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fAddPool, + Name: "add pool", Tasks: []tasktypes.TaskName{tCheckPoolAndConfig, tAddPool}, TargetStatus: tenantstatus.Running, OnFailure: tasktypes.FailureRule{ @@ -103,7 +103,7 @@ func genAddPoolFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genDeletePoolFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeletePool, + Name: "delete pool", Tasks: []tasktypes.TaskName{tDeletePool}, TargetStatus: tenantstatus.Running, }, @@ -113,7 +113,7 @@ func genDeletePoolFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genMaintainUnitConfigFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainUnitConfig, + Name: "maintain unit config", Tasks: []tasktypes.TaskName{tMaintainUnitConfig}, TargetStatus: tenantstatus.Running, }, @@ -123,7 +123,7 @@ func genMaintainUnitConfigFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genDeleteTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeleteTenant, + Name: "delete tenant", Tasks: []tasktypes.TaskName{tDeleteTenant}, TargetStatus: tenantstatus.FinalizerFinished, OnFailure: tasktypes.FailureRule{ @@ -136,7 +136,7 @@ func genDeleteTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genRestoreTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRestoreTenant, + Name: "restore tenant", Tasks: []tasktypes.TaskName{ tCheckTenant, tCheckPoolAndConfig, @@ -157,7 +157,7 @@ func genRestoreTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genCancelRestoreFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCancelRestore, + Name: "cancel restore", Tasks: []tasktypes.TaskName{ tCancelTenantRestoreJob, }, @@ -169,9 +169,9 @@ func genCancelRestoreFlow(_ *OBTenantManager) *tasktypes.TaskFlow { func genCreateEmptyStandbyTenantFlow(_ *OBTenantManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCreateEmptyStandbyTenant, + Name: "create empty standby tenant", Tasks: []tasktypes.TaskName{ - tCheckPrimaryTenantLSIntegrity, + tCheckPrimaryTenantLsIntegrity, tCheckTenant, tCheckPoolAndConfig, tCreateResourcePoolAndConfig, diff --git a/internal/resource/obtenant/obtenant_task.go b/internal/resource/obtenant/obtenant_task.go index 8ef16102f..ca604d1bb 100644 --- a/internal/resource/obtenant/obtenant_task.go +++ b/internal/resource/obtenant/obtenant_task.go @@ -35,7 +35,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*OBTenantManager]() @@ -238,7 +238,7 @@ func CreateEmptyStandbyTenant(m *OBTenantManager) tasktypes.TaskError { return nil } -func CheckPrimaryTenantLSIntegrity(m *OBTenantManager) tasktypes.TaskError { +func CheckPrimaryTenantLsIntegrity(m *OBTenantManager) tasktypes.TaskError { var err error if m.OBTenant.Spec.Source == nil || m.OBTenant.Spec.Source.Tenant == nil { return errors.New("Primary tenant must have source tenant") diff --git a/internal/resource/obtenant/obtenant_task_gen.go b/internal/resource/obtenant/obtenant_task_gen.go index 41514ad2d..5f739ae8d 100644 --- a/internal/resource/obtenant/obtenant_task_gen.go +++ b/internal/resource/obtenant/obtenant_task_gen.go @@ -12,7 +12,7 @@ func init() { taskMap.Register(tDeleteTenant, DeleteTenant) taskMap.Register(tCheckAndApplyCharset, CheckAndApplyCharset) taskMap.Register(tCreateEmptyStandbyTenant, CreateEmptyStandbyTenant) - taskMap.Register(tCheckPrimaryTenantLSIntegrity, CheckPrimaryTenantLSIntegrity) + taskMap.Register(tCheckPrimaryTenantLsIntegrity, CheckPrimaryTenantLsIntegrity) taskMap.Register(tCreateTenantRestoreJobCR, CreateTenantRestoreJobCR) taskMap.Register(tWatchRestoreJobToFinish, WatchRestoreJobToFinish) taskMap.Register(tCancelTenantRestoreJob, CancelTenantRestoreJob) diff --git a/internal/resource/obtenant/obtenant_taskname_gen.go b/internal/resource/obtenant/obtenant_taskname_gen.go new file mode 100644 index 000000000..ceb301603 --- /dev/null +++ b/internal/resource/obtenant/obtenant_taskname_gen.go @@ -0,0 +1,27 @@ +// Code generated by go generate; DO NOT EDIT. +package obtenant + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tCheckTenant ttypes.TaskName = "check tenant" + tCheckPoolAndConfig ttypes.TaskName = "check pool and config" + tCreateTenantWithClear ttypes.TaskName = "create tenant with clear" + tCreateResourcePoolAndConfig ttypes.TaskName = "create resource pool and config" + tAddPool ttypes.TaskName = "add pool" + tDeletePool ttypes.TaskName = "delete pool" + tMaintainUnitConfig ttypes.TaskName = "maintain unit config" + tDeleteTenant ttypes.TaskName = "delete tenant" + tCheckAndApplyCharset ttypes.TaskName = "check and apply charset" + tCreateEmptyStandbyTenant ttypes.TaskName = "create empty standby tenant" + tCheckPrimaryTenantLsIntegrity ttypes.TaskName = "check primary tenant ls integrity" + tCreateTenantRestoreJobCR ttypes.TaskName = "create tenant restore job cr" + tWatchRestoreJobToFinish ttypes.TaskName = "watch restore job to finish" + tCancelTenantRestoreJob ttypes.TaskName = "cancel tenant restore job" + tUpgradeTenantIfNeeded ttypes.TaskName = "upgrade tenant if needed" + tCheckAndApplyUnitNum ttypes.TaskName = "check and apply unit num" + tCheckAndApplyWhiteList ttypes.TaskName = "check and apply white list" + tCheckAndApplyPrimaryZone ttypes.TaskName = "check and apply primary zone" + tCheckAndApplyLocality ttypes.TaskName = "check and apply locality" + tCreateUserWithCredentialSecrets ttypes.TaskName = "create user with credential secrets" +) diff --git a/internal/resource/obtenantbackup/obtenantbackup_flow.go b/internal/resource/obtenantbackup/obtenantbackup_flow.go index af336722a..7387d8007 100644 --- a/internal/resource/obtenantbackup/obtenantbackup_flow.go +++ b/internal/resource/obtenantbackup/obtenantbackup_flow.go @@ -21,7 +21,7 @@ import ( func genCreateBackupJobInDBFlow(_ *OBTenantBackupManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCreateBackupJobInOB, + Name: "create backup job in ob", Tasks: []tasktypes.TaskName{tCreateBackupJobInOB}, TargetStatus: string(constants.BackupPolicyStatusRunning), OnFailure: tasktypes.FailureRule{ diff --git a/internal/resource/obtenantbackup/obtenantbackup_task.go b/internal/resource/obtenantbackup/obtenantbackup_task.go index 8028ea246..96ce38d78 100644 --- a/internal/resource/obtenantbackup/obtenantbackup_task.go +++ b/internal/resource/obtenantbackup/obtenantbackup_task.go @@ -18,7 +18,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*OBTenantBackupManager]() diff --git a/internal/resource/obtenantbackup/obtenantbackup_taskname_gen.go b/internal/resource/obtenantbackup/obtenantbackup_taskname_gen.go new file mode 100644 index 000000000..5c15d3ffa --- /dev/null +++ b/internal/resource/obtenantbackup/obtenantbackup_taskname_gen.go @@ -0,0 +1,8 @@ +// Code generated by go generate; DO NOT EDIT. +package obtenantbackup + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tCreateBackupJobInOB ttypes.TaskName = "create backup job in ob" +) diff --git a/internal/resource/obtenantbackuppolicy/names.go b/internal/resource/obtenantbackuppolicy/names.go deleted file mode 100644 index 5ed817fe2..000000000 --- a/internal/resource/obtenantbackuppolicy/names.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obtenantbackuppolicy - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -const ( - fPrepareBackupPolicy ttypes.FlowName = "prepare backup policy" - fStartBackupJob ttypes.FlowName = "start backup job" - fStopBackupPolicy ttypes.FlowName = "stop backup policy" - fMaintainRunningPolicy ttypes.FlowName = "maintain running policy" - fPauseBackup ttypes.FlowName = "pause backup" - fResumeBackup ttypes.FlowName = "resume backup" -) - -const ( - tConfigureServerForBackup ttypes.TaskName = "configure server for backup" - tCheckAndSpawnJobs ttypes.TaskName = "check and spawn jobs" - tStartBackup ttypes.TaskName = "start backup job" - tStopBackup ttypes.TaskName = "stop backup policy" - tCleanOldBackupJobs ttypes.TaskName = "clean old backup jobs" - tPauseBackup ttypes.TaskName = "pause backup" - tResumeBackup ttypes.TaskName = "resume backup" - tDeleteBackupPolicy ttypes.TaskName = "delete backup policy" -) diff --git a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_flow.go b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_flow.go index c0ca87c22..fa4482d92 100644 --- a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_flow.go +++ b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_flow.go @@ -20,7 +20,7 @@ import ( func genPrepareBackupPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fPrepareBackupPolicy, + Name: "prepare backup policy", Tasks: []tasktypes.TaskName{tConfigureServerForBackup}, TargetStatus: string(constants.BackupPolicyStatusPrepared), OnFailure: tasktypes.FailureRule{ @@ -33,7 +33,7 @@ func genPrepareBackupPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskF func genStartBackupJobFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fStartBackupJob, + Name: "start backup job", Tasks: []tasktypes.TaskName{tStartBackup}, TargetStatus: string(constants.BackupPolicyStatusRunning), OnFailure: tasktypes.FailureRule{ @@ -46,7 +46,7 @@ func genStartBackupJobFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { func genStopBackupPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fStopBackupPolicy, + Name: "stop backup policy", Tasks: []tasktypes.TaskName{tStopBackup}, TargetStatus: string(constants.BackupPolicyStatusStopped), }, @@ -56,7 +56,7 @@ func genStopBackupPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow func genMaintainRunningPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainRunningPolicy, + Name: "maintain running policy", Tasks: []tasktypes.TaskName{tConfigureServerForBackup, tCleanOldBackupJobs, tCheckAndSpawnJobs}, TargetStatus: string(constants.BackupPolicyStatusRunning), OnFailure: tasktypes.FailureRule{ @@ -69,7 +69,7 @@ func genMaintainRunningPolicyFlow(_ *ObTenantBackupPolicyManager) *tasktypes.Tas func genPauseBackupFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fPauseBackup, + Name: "pause backup", Tasks: []tasktypes.TaskName{tPauseBackup}, TargetStatus: string(constants.BackupPolicyStatusPaused), }, @@ -79,7 +79,7 @@ func genPauseBackupFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { func genResumeBackupFlow(_ *ObTenantBackupPolicyManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fResumeBackup, + Name: "resume backup", Tasks: []tasktypes.TaskName{tResumeBackup}, TargetStatus: string(constants.BackupPolicyStatusRunning), }, diff --git a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go index ab07d9667..113d2a3d0 100644 --- a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go +++ b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go @@ -31,7 +31,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*ObTenantBackupPolicyManager]() diff --git a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_taskname_gen.go b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_taskname_gen.go new file mode 100644 index 000000000..90aa7c9c7 --- /dev/null +++ b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_taskname_gen.go @@ -0,0 +1,14 @@ +// Code generated by go generate; DO NOT EDIT. +package obtenantbackuppolicy + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tConfigureServerForBackup ttypes.TaskName = "configure server for backup" + tStartBackup ttypes.TaskName = "start backup" + tStopBackup ttypes.TaskName = "stop backup" + tCheckAndSpawnJobs ttypes.TaskName = "check and spawn jobs" + tCleanOldBackupJobs ttypes.TaskName = "clean old backup jobs" + tPauseBackup ttypes.TaskName = "pause backup" + tResumeBackup ttypes.TaskName = "resume backup" +) diff --git a/internal/resource/obtenantoperation/names.go b/internal/resource/obtenantoperation/names.go deleted file mode 100644 index 47e4fe1d3..000000000 --- a/internal/resource/obtenantoperation/names.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obtenantoperation - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -// tenant operation flows -const ( - fChangeTenantRootPasswordFlow ttypes.FlowName = "change tenant root password" - fActivateStandbyTenantFlow ttypes.FlowName = "activate standby tenant" - fSwitchoverTenantsFlow ttypes.FlowName = "switchover tenants" - fRevertSwitchoverTenantsFlow ttypes.FlowName = "revert switchover tenants" - fOpUpgradeTenant ttypes.FlowName = "upgrade tenant" - fOpReplayLog ttypes.FlowName = "replay log" -) - -const ( - tChangeTenantRootPassword ttypes.TaskName = "change tenant root password" - tActivateStandbyTenant ttypes.TaskName = "activate standby" - tCreateUsersForActivatedStandby ttypes.TaskName = "create users for activated standby" - tSwitchTenantsRole ttypes.TaskName = "switch tenants role" - tSetTenantLogRestoreSource ttypes.TaskName = "set tenant log restore source" - tUpgradeTenant ttypes.TaskName = "upgrade tenant" - tReplayLogOfStandby ttypes.TaskName = "replay log" -) diff --git a/internal/resource/obtenantoperation/obtenantoperation_flow.go b/internal/resource/obtenantoperation/obtenantoperation_flow.go index d27ae9c42..99c04a25f 100644 --- a/internal/resource/obtenantoperation/obtenantoperation_flow.go +++ b/internal/resource/obtenantoperation/obtenantoperation_flow.go @@ -20,7 +20,7 @@ import ( func genChangeTenantRootPasswordFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fChangeTenantRootPasswordFlow, + Name: "change tenant root password", Tasks: []tasktypes.TaskName{ tChangeTenantRootPassword, }, @@ -35,7 +35,7 @@ func genChangeTenantRootPasswordFlow(_ *ObTenantOperationManager) *tasktypes.Tas func genActivateStandbyTenantOpFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fActivateStandbyTenantFlow, + Name: "activate standby tenant", Tasks: []tasktypes.TaskName{ tActivateStandbyTenant, tCreateUsersForActivatedStandby, @@ -51,7 +51,7 @@ func genActivateStandbyTenantOpFlow(_ *ObTenantOperationManager) *tasktypes.Task func genSwitchoverTenantsFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fSwitchoverTenantsFlow, + Name: "switchover tenants", Tasks: []tasktypes.TaskName{ tSwitchTenantsRole, tSetTenantLogRestoreSource, @@ -67,7 +67,7 @@ func genSwitchoverTenantsFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { func genRevertSwitchoverTenantsFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRevertSwitchoverTenantsFlow, + Name: "revert switchover tenants", Tasks: []tasktypes.TaskName{ tSwitchTenantsRole, }, @@ -82,7 +82,7 @@ func genRevertSwitchoverTenantsFlow(_ *ObTenantOperationManager) *tasktypes.Task func genUpgradeTenantFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fOpUpgradeTenant, + Name: "upgrade tenant", Tasks: []tasktypes.TaskName{ tUpgradeTenant, }, @@ -97,7 +97,7 @@ func genUpgradeTenantFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { func genReplayLogOfStandbyFlow(_ *ObTenantOperationManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fOpReplayLog, + Name: "replay log", Tasks: []tasktypes.TaskName{ tReplayLogOfStandby, }, diff --git a/internal/resource/obtenantoperation/obtenantoperation_task.go b/internal/resource/obtenantoperation/obtenantoperation_task.go index f89ff3847..95f131134 100644 --- a/internal/resource/obtenantoperation/obtenantoperation_task.go +++ b/internal/resource/obtenantoperation/obtenantoperation_task.go @@ -31,7 +31,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*ObTenantOperationManager]() diff --git a/internal/resource/obtenantoperation/obtenantoperation_taskname_gen.go b/internal/resource/obtenantoperation/obtenantoperation_taskname_gen.go new file mode 100644 index 000000000..ac67c03a2 --- /dev/null +++ b/internal/resource/obtenantoperation/obtenantoperation_taskname_gen.go @@ -0,0 +1,14 @@ +// Code generated by go generate; DO NOT EDIT. +package obtenantoperation + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tChangeTenantRootPassword ttypes.TaskName = "change tenant root password" + tActivateStandbyTenant ttypes.TaskName = "activate standby tenant" + tCreateUsersForActivatedStandby ttypes.TaskName = "create users for activated standby" + tSwitchTenantsRole ttypes.TaskName = "switch tenants role" + tSetTenantLogRestoreSource ttypes.TaskName = "set tenant log restore source" + tUpgradeTenant ttypes.TaskName = "upgrade tenant" + tReplayLogOfStandby ttypes.TaskName = "replay log of standby" +) diff --git a/internal/resource/obtenantrestore/names.go b/internal/resource/obtenantrestore/names.go deleted file mode 100644 index 7ee966a55..000000000 --- a/internal/resource/obtenantrestore/names.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obtenantrestore - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -const ( - fStartRestoreFlow ttypes.FlowName = "start restore" - fRestoreAsStandbyFlow ttypes.FlowName = "restore as standby" - fRestoreAsPrimaryFlow ttypes.FlowName = "restore as primary" -) - -const ( - tStartRestoreJobInOB ttypes.TaskName = "start restore job" - tStartLogReplay ttypes.TaskName = "start log replay" - tActivateStandby ttypes.TaskName = "activate standby" -) diff --git a/internal/resource/obtenantrestore/obtenantrestore_flow.go b/internal/resource/obtenantrestore/obtenantrestore_flow.go index 454d38069..a9e40507c 100644 --- a/internal/resource/obtenantrestore/obtenantrestore_flow.go +++ b/internal/resource/obtenantrestore/obtenantrestore_flow.go @@ -20,7 +20,7 @@ import ( func genStartRestoreJobFlow(_ *ObTenantRestoreManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fStartRestoreFlow, + Name: "start restore", Tasks: []tasktypes.TaskName{tStartRestoreJobInOB}, TargetStatus: string(constants.RestoreJobRunning), OnFailure: tasktypes.FailureRule{ @@ -33,7 +33,7 @@ func genStartRestoreJobFlow(_ *ObTenantRestoreManager) *tasktypes.TaskFlow { func genRestoreAsPrimaryFlow(_ *ObTenantRestoreManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRestoreAsPrimaryFlow, + Name: "restore as primary", Tasks: []tasktypes.TaskName{tActivateStandby}, TargetStatus: string(constants.RestoreJobSuccessful), OnFailure: tasktypes.FailureRule{ @@ -46,7 +46,7 @@ func genRestoreAsPrimaryFlow(_ *ObTenantRestoreManager) *tasktypes.TaskFlow { func genRestoreAsStandbyFlow(_ *ObTenantRestoreManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRestoreAsStandbyFlow, + Name: "restore as standby", Tasks: []tasktypes.TaskName{tStartLogReplay}, TargetStatus: string(constants.RestoreJobSuccessful), OnFailure: tasktypes.FailureRule{ diff --git a/internal/resource/obtenantrestore/obtenantrestore_task.go b/internal/resource/obtenantrestore/obtenantrestore_task.go index 49cbe5a08..1dc096cba 100644 --- a/internal/resource/obtenantrestore/obtenantrestore_task.go +++ b/internal/resource/obtenantrestore/obtenantrestore_task.go @@ -34,7 +34,7 @@ import ( // OBTenantRestore tasks -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*ObTenantRestoreManager]() diff --git a/internal/resource/obtenantrestore/obtenantrestore_taskname_gen.go b/internal/resource/obtenantrestore/obtenantrestore_taskname_gen.go new file mode 100644 index 000000000..a8da3588b --- /dev/null +++ b/internal/resource/obtenantrestore/obtenantrestore_taskname_gen.go @@ -0,0 +1,10 @@ +// Code generated by go generate; DO NOT EDIT. +package obtenantrestore + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tStartRestoreJobInOB ttypes.TaskName = "start restore job in ob" + tStartLogReplay ttypes.TaskName = "start log replay" + tActivateStandby ttypes.TaskName = "activate standby" +) diff --git a/internal/resource/obzone/names.go b/internal/resource/obzone/names.go deleted file mode 100644 index 7ee2716aa..000000000 --- a/internal/resource/obzone/names.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package obzone - -import ( - ttypes "github.com/oceanbase/ob-operator/pkg/task/types" -) - -// obzone flows -const ( - fMigrateOBZoneFromExisting ttypes.FlowName = "migrate obzone from existing" - fPrepareOBZoneForBootstrap ttypes.FlowName = "prepare obzone for bootstrap" - fMaintainOBZoneAfterBootstrap ttypes.FlowName = "maintain obzone after bootstrap" - fAddOBServer ttypes.FlowName = "add observer" - fDeleteOBServer ttypes.FlowName = "delete observer" - fUpgradeOBZone ttypes.FlowName = "upgrade obzone" - fForceUpgradeOBZone ttypes.FlowName = "force upgrade obzone" - fCreateOBZone ttypes.FlowName = "create obzone" - fDeleteOBZoneFinalizer ttypes.FlowName = "delete obzone finalizer" - fScaleUpOBServers ttypes.FlowName = "scale up observers" - fExpandPVC ttypes.FlowName = "expand pvc for obzone" - fMountBackupVolume ttypes.FlowName = "mount backup volume for obzone" - fRollingUpdateOBServers ttypes.FlowName = "rolling update observers" -) - -// obzone tasks -const ( - tCreateOBServer ttypes.TaskName = "create observer" - tUpgradeOBServer ttypes.TaskName = "upgrade observer" - tWaitOBServerUpgraded ttypes.TaskName = "wait observer upgraded" - tDeleteLegacyOBServers ttypes.TaskName = "delete legacy observers" - tDeleteOBServer ttypes.TaskName = "delete observer" - tDeleteAllOBServer ttypes.TaskName = "delete all observer" - tAddZone ttypes.TaskName = "add zone" - tStartOBZone ttypes.TaskName = "start obzone" - tWaitOBServerBootstrapReady ttypes.TaskName = "wait observer bootstrap ready" - tWaitOBServerRunning ttypes.TaskName = "wait observer running" - tWaitReplicaMatch ttypes.TaskName = "wait replica match" - tWaitOBServerDeleted ttypes.TaskName = "wait observer deleted" - tStopOBZone ttypes.TaskName = "stop obzone" - tDeleteOBZoneInCluster ttypes.TaskName = "delete obzone in cluster" - tOBClusterHealthCheck ttypes.TaskName = "obcluster health check" - tOBZoneHealthCheck ttypes.TaskName = "obzone health check" - tScaleUpOBServers ttypes.TaskName = "scale up observers" - tWaitForOBServerScalingUp ttypes.TaskName = "wait for observer scaling up" - tExpandPVC ttypes.TaskName = "expand pvc" - tWaitForOBServerExpandingPVC ttypes.TaskName = "wait for observer to expand pvc" - tMountBackupVolume ttypes.TaskName = "mount backup volume" - tWaitForOBServerMounting ttypes.TaskName = "wait for observer to mount backup volume" - tRollingReplaceOBServers ttypes.TaskName = "rolling replace observers" -) diff --git a/internal/resource/obzone/obzone_flow.go b/internal/resource/obzone/obzone_flow.go index e93c3805c..b6a1107b5 100644 --- a/internal/resource/obzone/obzone_flow.go +++ b/internal/resource/obzone/obzone_flow.go @@ -21,7 +21,7 @@ import ( func genMigrateOBZoneFromExistingFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMigrateOBZoneFromExisting, + Name: "migrate obzone from existing", Tasks: []tasktypes.TaskName{tCreateOBServer, tWaitOBServerRunning, tDeleteLegacyOBServers}, TargetStatus: zonestatus.Running, }, @@ -31,7 +31,7 @@ func genMigrateOBZoneFromExistingFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genPrepareOBZoneForBootstrapFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fPrepareOBZoneForBootstrap, + Name: "prepare obzone for bootstrap", Tasks: []tasktypes.TaskName{tCreateOBServer, tWaitOBServerBootstrapReady}, TargetStatus: zonestatus.BootstrapReady, }, @@ -41,7 +41,7 @@ func genPrepareOBZoneForBootstrapFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genMaintainOBZoneAfterBootstrapFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMaintainOBZoneAfterBootstrap, + Name: "maintain obzone after bootstrap", Tasks: []tasktypes.TaskName{tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, @@ -51,7 +51,7 @@ func genMaintainOBZoneAfterBootstrapFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genCreateOBZoneFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fCreateOBZone, + Name: "create obzone", Tasks: []tasktypes.TaskName{tAddZone, tStartOBZone, tCreateOBServer, tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, @@ -61,7 +61,7 @@ func genCreateOBZoneFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genAddOBServerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fAddOBServer, + Name: "add observer", Tasks: []tasktypes.TaskName{tCreateOBServer, tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, @@ -71,7 +71,7 @@ func genAddOBServerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genDeleteOBServerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeleteOBServer, + Name: "delete observer", Tasks: []tasktypes.TaskName{tDeleteOBServer, tWaitReplicaMatch}, TargetStatus: zonestatus.Running, OnFailure: tasktypes.FailureRule{ @@ -84,7 +84,7 @@ func genDeleteOBServerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genDeleteOBZoneFinalizerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fDeleteOBZoneFinalizer, + Name: "delete obzone finalizer", Tasks: []tasktypes.TaskName{tStopOBZone, tDeleteAllOBServer, tWaitOBServerDeleted, tDeleteOBZoneInCluster}, TargetStatus: zonestatus.FinalizerFinished, }, @@ -94,7 +94,7 @@ func genDeleteOBZoneFinalizerFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genUpgradeOBZoneFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fUpgradeOBZone, + Name: "upgrade obzone", Tasks: []tasktypes.TaskName{tOBClusterHealthCheck, tStopOBZone, tUpgradeOBServer, tWaitOBServerUpgraded, tOBZoneHealthCheck, tStartOBZone}, TargetStatus: zonestatus.Running, }, @@ -104,18 +104,18 @@ func genUpgradeOBZoneFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genForceUpgradeOBZoneFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fForceUpgradeOBZone, + Name: "force upgrade obzone", Tasks: []tasktypes.TaskName{tOBClusterHealthCheck, tUpgradeOBServer, tWaitOBServerUpgraded, tOBZoneHealthCheck}, TargetStatus: zonestatus.Running, }, } } -func genScaleUpOBServersFlow(_ *OBZoneManager) *tasktypes.TaskFlow { +func genScaleOBServersVerticallyFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fScaleUpOBServers, - Tasks: []tasktypes.TaskName{tScaleUpOBServers, tWaitForOBServerScalingUp, tWaitOBServerRunning}, + Name: "scale up observers", + Tasks: []tasktypes.TaskName{tScaleOBServersVertically, tWaitForOBServerScalingUp, tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, } @@ -124,18 +124,18 @@ func genScaleUpOBServersFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genFlowExpandPVC(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fExpandPVC, + Name: "expand pvc", Tasks: []tasktypes.TaskName{tExpandPVC, tWaitForOBServerExpandingPVC, tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, } } -func genMountBackupVolumeFlow(_ *OBZoneManager) *tasktypes.TaskFlow { +func genModifyServerTemplateFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fMountBackupVolume, - Tasks: []tasktypes.TaskName{tMountBackupVolume, tWaitForOBServerMounting, tWaitOBServerRunning}, + Name: "modify server template", + Tasks: []tasktypes.TaskName{tModifyPodTemplate, tWaitForOBServerTemplateModifying, tWaitOBServerRunning}, TargetStatus: zonestatus.Running, }, } @@ -144,7 +144,7 @@ func genMountBackupVolumeFlow(_ *OBZoneManager) *tasktypes.TaskFlow { func genRollingReplaceServersFlow(_ *OBZoneManager) *tasktypes.TaskFlow { return &tasktypes.TaskFlow{ OperationContext: &tasktypes.OperationContext{ - Name: fRollingUpdateOBServers, + Name: "rolling update observers", Tasks: []tasktypes.TaskName{tRollingReplaceOBServers}, TargetStatus: zonestatus.Running, }, diff --git a/internal/resource/obzone/obzone_manager.go b/internal/resource/obzone/obzone_manager.go index 5a8ce59a0..762bf5795 100644 --- a/internal/resource/obzone/obzone_manager.go +++ b/internal/resource/obzone/obzone_manager.go @@ -109,12 +109,12 @@ func (m *OBZoneManager) GetTaskFlow() (*tasktypes.TaskFlow, error) { taskFlow = genDeleteOBServerFlow(m) case zonestatus.Deleting: taskFlow = genDeleteOBZoneFinalizerFlow(m) - case zonestatus.ScaleUp: - taskFlow = genScaleUpOBServersFlow(m) + case zonestatus.ScaleVertically: + taskFlow = genScaleOBServersVerticallyFlow(m) case zonestatus.ExpandPVC: taskFlow = genFlowExpandPVC(m) - case zonestatus.MountBackupVolume: - taskFlow = genMountBackupVolumeFlow(m) + case zonestatus.ModifyServerTemplate: + taskFlow = genModifyServerTemplateFlow(m) case zonestatus.RollingUpdateServers: taskFlow = genRollingReplaceServersFlow(m) case zonestatus.Upgrade: @@ -223,7 +223,7 @@ func (m *OBZoneManager) UpdateStatus() error { } else { for _, observer := range observerList.Items { if m.OBZone.SupportStaticIP() && m.checkIfCalcResourceChange(&observer) { - m.OBZone.Status.Status = zonestatus.ScaleUp + m.OBZone.Status.Status = zonestatus.ScaleVertically break } if m.checkIfStorageClassChanged(&observer) { @@ -234,8 +234,8 @@ func (m *OBZoneManager) UpdateStatus() error { m.OBZone.Status.Status = zonestatus.ExpandPVC break } - if m.checkIfBackupVolumeAdded(&observer) { - m.OBZone.Status.Status = zonestatus.MountBackupVolume + if m.checkIfBackupVolumeMutated(&observer) || m.checkIfMonitorMutated(&observer) { + m.OBZone.Status.Status = zonestatus.ModifyServerTemplate break } } diff --git a/internal/resource/obzone/obzone_task.go b/internal/resource/obzone/obzone_task.go index c878aa612..d51c9ce18 100644 --- a/internal/resource/obzone/obzone_task.go +++ b/internal/resource/obzone/obzone_task.go @@ -31,7 +31,7 @@ import ( tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -//go:generate task-register $GOFILE +//go:generate task_register $GOFILE var taskMap = builder.NewTaskHub[*OBZoneManager]() @@ -244,7 +244,7 @@ func DeleteOBZoneInCluster(m *OBZoneManager) tasktypes.TaskError { return nil } -func ScaleUpOBServers(m *OBZoneManager) tasktypes.TaskError { +func ScaleOBServersVertically(m *OBZoneManager) tasktypes.TaskError { observerList, err := m.listOBServers() if err != nil { return err @@ -293,20 +293,30 @@ func ExpandPVC(m *OBZoneManager) tasktypes.TaskError { return nil } -func MountBackupVolume(m *OBZoneManager) tasktypes.TaskError { +func ModifyPodTemplate(m *OBZoneManager) tasktypes.TaskError { observerList, err := m.listOBServers() if err != nil { return err } for _, observer := range observerList.Items { if observer.Spec.BackupVolume == nil && m.OBZone.Spec.BackupVolume != nil { - m.Logger.Info("Mount backup volume", "observer", observer.Name) + m.Logger.Info("Add backup volume", "observer", observer.Name) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { observer.Spec.BackupVolume = m.OBZone.Spec.BackupVolume return m.Client.Update(m.Ctx, &observer) }) if err != nil { - return errors.Wrapf(err, "Mount backup volume %s failed", observer.Name) + return errors.Wrapf(err, "Add backup volume %s failed", observer.Name) + } + } + if observer.Spec.MonitorTemplate == nil && m.OBZone.Spec.MonitorTemplate != nil { + m.Logger.Info("Add monitor template", "observer", observer.Name) + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + observer.Spec.MonitorTemplate = m.OBZone.Spec.MonitorTemplate + return m.Client.Update(m.Ctx, &observer) + }) + if err != nil { + return errors.Wrapf(err, "Add monitor template %s failed", observer.Name) } } } @@ -359,15 +369,15 @@ func WaitOBServerRunning(m *OBZoneManager) tasktypes.TaskError { } func WaitForOBServerScalingUp(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.ScaleUp, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.ScaleVertically, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitForOBServerExpandingPVC(m *OBZoneManager) tasktypes.TaskError { return m.generateWaitOBServerStatusFunc(serverstatus.ExpandPVC, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } -func WaitForOBServerMounting(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.MountBackupVolume, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() +func WaitForOBServerTemplateModifying(m *OBZoneManager) tasktypes.TaskError { + return m.generateWaitOBServerStatusFunc(serverstatus.ModifyingPodTemplate, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func RollingReplaceOBServers(m *OBZoneManager) tasktypes.TaskError { diff --git a/internal/resource/obzone/obzone_task_gen.go b/internal/resource/obzone/obzone_task_gen.go index 3c16c9f1a..34aa361e9 100644 --- a/internal/resource/obzone/obzone_task_gen.go +++ b/internal/resource/obzone/obzone_task_gen.go @@ -15,14 +15,14 @@ func init() { taskMap.Register(tUpgradeOBServer, UpgradeOBServer) taskMap.Register(tWaitOBServerUpgraded, WaitOBServerUpgraded) taskMap.Register(tDeleteOBZoneInCluster, DeleteOBZoneInCluster) - taskMap.Register(tScaleUpOBServers, ScaleUpOBServers) + taskMap.Register(tScaleOBServersVertically, ScaleOBServersVertically) taskMap.Register(tExpandPVC, ExpandPVC) - taskMap.Register(tMountBackupVolume, MountBackupVolume) + taskMap.Register(tModifyPodTemplate, ModifyPodTemplate) taskMap.Register(tDeleteLegacyOBServers, DeleteLegacyOBServers) taskMap.Register(tWaitOBServerBootstrapReady, WaitOBServerBootstrapReady) taskMap.Register(tWaitOBServerRunning, WaitOBServerRunning) taskMap.Register(tWaitForOBServerScalingUp, WaitForOBServerScalingUp) taskMap.Register(tWaitForOBServerExpandingPVC, WaitForOBServerExpandingPVC) - taskMap.Register(tWaitForOBServerMounting, WaitForOBServerMounting) + taskMap.Register(tWaitForOBServerTemplateModifying, WaitForOBServerTemplateModifying) taskMap.Register(tRollingReplaceOBServers, RollingReplaceOBServers) } diff --git a/internal/resource/obzone/obzone_taskname_gen.go b/internal/resource/obzone/obzone_taskname_gen.go new file mode 100644 index 000000000..3bbf07ff0 --- /dev/null +++ b/internal/resource/obzone/obzone_taskname_gen.go @@ -0,0 +1,30 @@ +// Code generated by go generate; DO NOT EDIT. +package obzone + +import ttypes "github.com/oceanbase/ob-operator/pkg/task/types" + +const ( + tAddZone ttypes.TaskName = "add zone" + tStartOBZone ttypes.TaskName = "start obzone" + tCreateOBServer ttypes.TaskName = "create observer" + tDeleteOBServer ttypes.TaskName = "delete observer" + tDeleteAllOBServer ttypes.TaskName = "delete all observer" + tWaitReplicaMatch ttypes.TaskName = "wait replica match" + tWaitOBServerDeleted ttypes.TaskName = "wait observer deleted" + tStopOBZone ttypes.TaskName = "stop obzone" + tOBClusterHealthCheck ttypes.TaskName = "obcluster health check" + tOBZoneHealthCheck ttypes.TaskName = "obzone health check" + tUpgradeOBServer ttypes.TaskName = "upgrade observer" + tWaitOBServerUpgraded ttypes.TaskName = "wait observer upgraded" + tDeleteOBZoneInCluster ttypes.TaskName = "delete obzone in cluster" + tScaleOBServersVertically ttypes.TaskName = "scale observers vertically" + tExpandPVC ttypes.TaskName = "expand pvc" + tModifyPodTemplate ttypes.TaskName = "modify pod template" + tDeleteLegacyOBServers ttypes.TaskName = "delete legacy observers" + tWaitOBServerBootstrapReady ttypes.TaskName = "wait observer bootstrap ready" + tWaitOBServerRunning ttypes.TaskName = "wait observer running" + tWaitForOBServerScalingUp ttypes.TaskName = "wait for observer scaling up" + tWaitForOBServerExpandingPVC ttypes.TaskName = "wait for observer expanding pvc" + tWaitForOBServerTemplateModifying ttypes.TaskName = "wait for observer template modifying" + tRollingReplaceOBServers ttypes.TaskName = "rolling replace observers" +) diff --git a/internal/resource/obzone/utils.go b/internal/resource/obzone/utils.go index 1c43f4bad..015ff8819 100644 --- a/internal/resource/obzone/utils.go +++ b/internal/resource/obzone/utils.go @@ -60,8 +60,12 @@ func (m *OBZoneManager) checkIfCalcResourceChange(observer *v1alpha1.OBServer) b observer.Spec.OBServerTemplate.Resource.Memory.Cmp(m.OBZone.Spec.OBServerTemplate.Resource.Memory) != 0 } -func (m *OBZoneManager) checkIfBackupVolumeAdded(observer *v1alpha1.OBServer) bool { - return observer.Spec.BackupVolume == nil && m.OBZone.Spec.BackupVolume != nil +func (m *OBZoneManager) checkIfBackupVolumeMutated(observer *v1alpha1.OBServer) bool { + return (observer.Spec.BackupVolume == nil) != (m.OBZone.Spec.BackupVolume == nil) +} + +func (m *OBZoneManager) checkIfMonitorMutated(observer *v1alpha1.OBServer) bool { + return (observer.Spec.MonitorTemplate == nil) != (m.OBZone.Spec.MonitorTemplate == nil) } func (m *OBZoneManager) retryUpdateStatus() error { diff --git a/make/deps.mk b/make/deps.mk index 2076339d6..09d0f799c 100644 --- a/make/deps.mk +++ b/make/deps.mk @@ -43,4 +43,4 @@ tools: kustomize controller-gen envtest install-delve ## Download all tools .PHONY: init-generator init-generator: ## Install generator tools - go install cmd/generator/task/task-register.go + go install internal/cmds/generator/task/task_register.go diff --git a/make/development.mk b/make/development.mk index 8c4cd62a7..862a0c039 100644 --- a/make/development.mk +++ b/make/development.mk @@ -77,11 +77,11 @@ commit-hook: $(GOLANGCI_LINT) ## Install commit hook. echo "make export-operator export-charts" >> .git/hooks/pre-commit .PHONY: run-delve -run-delve: generate fmt vet manifests ## Run with Delve for development purposes against the configured Kubernetes cluster in ~/.kube/config +run-delve: fmt vet manifests ## Run with Delve for development purposes against the configured Kubernetes cluster in ~/.kube/config go build -gcflags "all=-trimpath=$(shell go env GOPATH)" -o bin/manager cmd/operator/main.go - DISABLE_WEBHOOKS=true DISABLE_TELEMETRY=true dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./bin/manager --continue -- -log-verbosity=${LOG_LEVEL} + DISABLE_WEBHOOKS=true dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./bin/manager --continue -- --log-verbosity=${LOG_LEVEL} .PHONY: run-local -run-local: manifests generate fmt vet ## Run a controller on your local host, with configurations in ~/.kube/config +run-local: manifests fmt vet ## Run a controller on your local host, with configurations in ~/.kube/config @mkdir -p testreports/covdata - CGO_ENABLED=1 GOCOVERDIR=testreports/covdata DISABLE_WEBHOOKS=true DISABLE_TELEMETRY=true go run -cover -covermode=atomic ./cmd/operator/main.go --log-verbosity=${LOG_LEVEL} + CGO_ENABLED=1 GOCOVERDIR=testreports/covdata DISABLE_WEBHOOKS=true go run -cover -covermode=atomic ./cmd/operator/main.go --log-verbosity=${LOG_LEVEL} diff --git a/scripts/connect.sh b/scripts/connect.sh index 2a70d5c3a..115c6834e 100644 --- a/scripts/connect.sh +++ b/scripts/connect.sh @@ -23,6 +23,7 @@ function print_help { echo " -t OBTenant of the OBCluster. If not specified, the script will connect to the sys tenant." echo " -u, --user User of the tenant. Default is root." echo " -p, --password Password of the user. If the user is not root, the password is required." + echo " --show-password Show the password in the output." echo " --proxy Connect to the obproxy deployment with default name." echo " --proxy-name Connect to the obproxy deployment with the specified name. (--deploy-name in setup-obproxy.sh)" } @@ -109,6 +110,9 @@ while [[ $# -gt 0 ]]; do PROXY_DEPLOY_NAME=$2 shift ;; + --show-password) + SHOW_PASSWORD=true + ;; *) break ;; @@ -164,6 +168,11 @@ fi ROOT_SECRET=$(kubectl get obcluster $OB_CLUSTER -n $NAMESPACE -o jsonpath='{.spec.userSecrets.root}') export ROOT_PWD=$(kubectl get secret $ROOT_SECRET -n $NAMESPACE -o jsonpath='{.data.password}' | base64 -d) +if [[ $SHOW_PASSWORD == true ]]; then + echo "Root Password: $ROOT_PWD" + exit 0 +fi + echo "Connecting to OBCluster \"$OB_CLUSTER\" in namespace \"$NAMESPACE\"..." echo "Host IP: $CONNECTING_HOST" if [[ -n $OB_TENANT ]]; then