diff --git a/api/constants/backup.go b/api/constants/backup.go index 70fb4152b..9a23bfaee 100644 --- a/api/constants/backup.go +++ b/api/constants/backup.go @@ -12,7 +12,11 @@ See the Mulan PSL v2 for more details. package constants -import "github.com/oceanbase/ob-operator/api/types" +import ( + "regexp" + + "github.com/oceanbase/ob-operator/api/types" +) const ( BackupJobTypeFull types.BackupJobType = "FULL" @@ -45,8 +49,11 @@ const ( ) const ( - BackupDestTypeOSS types.BackupDestType = "OSS" - BackupDestTypeNFS types.BackupDestType = "NFS" + BackupDestTypeOSS types.BackupDestType = "OSS" + BackupDestTypeNFS types.BackupDestType = "NFS" + BackupDestTypeCOS types.BackupDestType = "COS" + BackupDestTypeS3 types.BackupDestType = "S3" + BackupDestTypeS3Compatible types.BackupDestType = "S3_COMPATIBLE" ) const ( @@ -58,3 +65,11 @@ const ( ArchiveBindingOptional types.ArchiveBinding = "Optional" ArchiveBindingMandatory types.ArchiveBinding = "Mandatory" ) + +var DestPathPatternMapping = map[types.BackupDestType]*regexp.Regexp{ + BackupDestTypeOSS: regexp.MustCompile(`^oss://[^/]+/[^/].*\\?host=.+$`), + BackupDestTypeCOS: regexp.MustCompile(`^cos://[^/]+/[^/].*\\?host=.+$`), + BackupDestTypeS3: regexp.MustCompile(`^s3://[^/]+/[^/].*\\?host=.+$`), + BackupDestTypeS3Compatible: regexp.MustCompile(`^s3://[^/]+/[^/].*\\?host=.+$`), + BackupDestTypeNFS: regexp.MustCompile(`^\S+$`), +} diff --git a/api/v1alpha1/obtenantbackuppolicy_webhook.go b/api/v1alpha1/obtenantbackuppolicy_webhook.go index c63a361f0..550838bc9 100644 --- a/api/v1alpha1/obtenantbackuppolicy_webhook.go +++ b/api/v1alpha1/obtenantbackuppolicy_webhook.go @@ -81,10 +81,10 @@ func (r *OBTenantBackupPolicy) Default() { // only "default" is permitted r.Spec.DataClean.Name = "default" - if r.Spec.DataBackup.Destination.Type == constants.BackupDestTypeOSS { + if r.Spec.DataBackup.Destination.Type != constants.BackupDestTypeNFS { r.Spec.DataBackup.Destination.Path = strings.ReplaceAll(r.Spec.DataBackup.Destination.Path, "/?", "?") } - if r.Spec.LogArchive.Destination.Type == constants.BackupDestTypeOSS { + if r.Spec.LogArchive.Destination.Type != constants.BackupDestTypeNFS { r.Spec.LogArchive.Destination.Path = strings.ReplaceAll(r.Spec.LogArchive.Destination.Path, "/?", "?") } if r.Labels == nil { @@ -221,8 +221,6 @@ func (r *OBTenantBackupPolicy) validateBackupPolicy() error { return err } - ossPathPattern := regexp.MustCompile("^oss://[^/]+/[^/].*\\?host=.+$") - if r.Spec.DataBackup.EncryptionSecret != "" { sec := &v1.Secret{} err := bakClt.Get(context.Background(), types.NamespacedName{ @@ -241,105 +239,12 @@ func (r *OBTenantBackupPolicy) validateBackupPolicy() error { return field.Invalid(field.NewPath("spec").Child("logArchive").Child("binding"), r.Spec.LogArchive.Binding, "invalid binding, only optional and mandatory are supported") } - // Check types of destinations are legal - if r.Spec.LogArchive.Destination.Type != constants.BackupDestTypeNFS && r.Spec.LogArchive.Destination.Type != constants.BackupDestTypeOSS { - return field.Invalid(field.NewPath("spec").Child("logArchive").Child("destination").Child("type"), r.Spec.LogArchive.Destination.Type, "invalid destination type, only NFS and OSS are supported") - } - if r.Spec.DataBackup.Destination.Type != constants.BackupDestTypeNFS && r.Spec.DataBackup.Destination.Type != constants.BackupDestTypeOSS { - return field.Invalid(field.NewPath("spec").Child("dataBackup").Child("destination").Child("type"), r.Spec.DataBackup.Destination.Type, "invalid destination type, only NFS and OSS are supported") - } - - if r.Spec.DataBackup.Destination.Type == constants.BackupDestTypeNFS || - r.Spec.LogArchive.Destination.Type == constants.BackupDestTypeNFS { - if cluster.Spec.BackupVolume == nil { - return field.Invalid(field.NewPath("spec").Child("clusterName"), r.Spec.ObClusterName, "backupVolume of obcluster is required when using NFS") - } - } - - // Check oss access of destinations - if r.Spec.DataBackup.Destination.Type == constants.BackupDestTypeOSS && r.Spec.DataBackup.Destination.OSSAccessSecret != "" { - if !ossPathPattern.MatchString(r.Spec.DataBackup.Destination.Path) { - return field.Invalid(field.NewPath("spec").Child("dataBackup").Child("destination").Child("path"), r.Spec.DataBackup.Destination.Path, "invalid path, pattern: ^oss://[^/]+/[^/].*\\?host=.+$") - } - - secret := &v1.Secret{} - err := bakClt.Get(context.Background(), types.NamespacedName{ - Namespace: r.GetNamespace(), - Name: r.Spec.DataBackup.Destination.OSSAccessSecret, - }, secret) - if err != nil { - if apierrors.IsNotFound(err) { - return field.Invalid( - field.NewPath("spec").Child("dataBackup").Child("destination").Child("ossAccessSecret"), - r.Spec.DataBackup.Destination.OSSAccessSecret, - "Given OSSAccessSecret not found", - ) - } - return err - } - var allErrs field.ErrorList - - if _, ok := secret.Data["accessId"]; !ok { - allErrs = append(allErrs, field.Invalid( - field.NewPath("spec").Child("dataBackup").Child("destination").Child("ossAccessSecret"), - r.Spec.DataBackup.Destination.OSSAccessSecret, - "accessId field not found in given OSSAccessSecret", - )) - } - if _, ok := secret.Data["accessKey"]; !ok { - allErrs = append(allErrs, field.Invalid( - field.NewPath("spec").Child("dataBackup").Child("destination").Child("ossAccessSecret"), - r.Spec.DataBackup.Destination.OSSAccessSecret, - "accessKey field not found in given OSSAccessSecret", - )) - } - if len(allErrs) != 0 { - return allErrs.ToAggregate() - } - } - - if r.Spec.LogArchive.Destination.Type == constants.BackupDestTypeOSS && r.Spec.LogArchive.Destination.OSSAccessSecret != "" { - if !ossPathPattern.MatchString(r.Spec.LogArchive.Destination.Path) { - return field.Invalid(field.NewPath("spec").Child("logArchive").Child("destination").Child("path"), r.Spec.LogArchive.Destination.Path, "invalid path, pattern: ^oss://[^/]+/[^/].*\\?host=.+$") - } - - secret := &v1.Secret{} - err := bakClt.Get(context.Background(), types.NamespacedName{ - Namespace: r.GetNamespace(), - Name: r.Spec.LogArchive.Destination.OSSAccessSecret, - }, secret) - if err != nil { - if apierrors.IsNotFound(err) { - return field.Invalid( - field.NewPath("spec").Child("logArchive").Child("destination").Child("ossAccessSecret"), - r.Spec.LogArchive.Destination.OSSAccessSecret, - "Given OSSAccessSecret not found", - ) - } - return err - } - - var allErrs field.ErrorList - if _, ok := secret.Data["accessId"]; !ok { - allErrs = append(allErrs, field.Invalid( - field.NewPath("spec").Child("logArchive").Child("destination").Child("ossAccessSecret"), - r.Spec.LogArchive.Destination.OSSAccessSecret, - "accessId field not found in given OSSAccessSecret", - )) - } - if _, ok := secret.Data["accessKey"]; !ok { - allErrs = append(allErrs, field.Invalid( - field.NewPath("spec").Child("logArchive").Child("destination").Child("ossAccessSecret"), - r.Spec.LogArchive.Destination.OSSAccessSecret, - "accessKey field not found in given OSSAccessSecret", - )) - } - if len(allErrs) != 0 { - return allErrs.ToAggregate() - } - } + destErrs := errors.Join( + r.validateDestination(cluster, &r.Spec.DataBackup.Destination, "dataBackup"), + r.validateDestination(cluster, &r.Spec.LogArchive.Destination, "logArchive"), + ) - return nil + return destErrs } func (r *OBTenantBackupPolicy) validateInterval() error { @@ -387,3 +292,57 @@ func validateScheduleFormat(schedule string, fldPath *field.Path) *field.Error { } return nil } + +func (r *OBTenantBackupPolicy) validateDestination(cluster *OBCluster, dest *apitypes.BackupDestination, fieldName string) *field.Error { + if dest.Type == constants.BackupDestTypeNFS && cluster.Spec.BackupVolume == nil { + return field.Invalid(field.NewPath("spec").Child("backupVolume"), cluster.Spec.BackupVolume, "backupVolume of obcluster is required when backing up data to NFS") + } + pattern, ok := constants.DestPathPatternMapping[dest.Type] + if !ok { + return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.Type, "invalid backup destination type") + } + if !pattern.MatchString(dest.Path) { + return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.Path, "invalid backup destination path, the path format should be "+pattern.String()) + } + if dest.Type != constants.BackupDestTypeNFS && dest.OSSAccessSecret == "" { + return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.OSSAccessSecret, "OSSAccessSecret is required when backing up data to OSS, COS or S3") + } + secret := &v1.Secret{} + err := bakClt.Get(context.Background(), types.NamespacedName{ + Namespace: r.GetNamespace(), + Name: dest.OSSAccessSecret, + }, secret) + fieldPath := field.NewPath("spec").Child(fieldName).Child("destination").Child("ossAccessSecret") + if err != nil { + if apierrors.IsNotFound(err) { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "Given OSSAccessSecret not found") + } + return field.InternalError(fieldPath, err) + } + // All the following types need accessId and accessKey + switch dest.Type { + case + constants.BackupDestTypeCOS, + constants.BackupDestTypeOSS, + constants.BackupDestTypeS3, + constants.BackupDestTypeS3Compatible: + if _, ok := secret.Data["accessId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessId field not found in given OSSAccessSecret") + } + if _, ok := secret.Data["accessKey"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret") + } + } + // The following types need additional fields + switch dest.Type { + case constants.BackupDestTypeCOS: + if _, ok := secret.Data["appId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "appId field not found in given OSSAccessSecret") + } + case constants.BackupDestTypeS3: + if _, ok := secret.Data["s3Region"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "s3Region field not found in given OSSAccessSecret") + } + } + return nil +} diff --git a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_manager.go b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_manager.go index 7d53fc28a..77df6d105 100644 --- a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_manager.go +++ b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_manager.go @@ -190,10 +190,10 @@ func (m *ObTenantBackupPolicyManager) UpdateStatus() error { return err } var backupPath string - if m.BackupPolicy.Spec.DataBackup.Destination.Type == constants.BackupDestTypeOSS { + if m.BackupPolicy.Spec.DataBackup.Destination.Type != constants.BackupDestTypeNFS { backupPath = m.BackupPolicy.Spec.DataBackup.Destination.Path } else { - backupPath = m.getBackupDestPath() + backupPath = m.getDestPath(m.BackupPolicy.Spec.DataBackup.Destination) } latestFull, err := m.getLatestBackupJobOfTypeAndPath(constants.BackupJobTypeFull, backupPath) diff --git a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go index 113d2a3d0..0960724ae 100644 --- a/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go +++ b/internal/resource/obtenantbackuppolicy/obtenantbackuppolicy_task.go @@ -79,7 +79,7 @@ func ConfigureServerForBackup(m *ObTenantBackupPolicyManager) tasktypes.TaskErro } } else { archiveSpec := m.BackupPolicy.Spec.LogArchive - archivePath := m.getArchiveDestPath() + archivePath := m.getDestPath(archiveSpec.Destination) for _, config := range configs { switch { case config.Name == "path" && config.Value != archivePath: @@ -108,7 +108,7 @@ func ConfigureServerForBackup(m *ObTenantBackupPolicyManager) tasktypes.TaskErro return err } if latestRunning == nil { - err = con.SetDataBackupDestForTenant(m.Ctx, m.getBackupDestPath()) + err = con.SetDataBackupDestForTenant(m.Ctx, m.getDestPath(m.BackupPolicy.Spec.DataBackup.Destination)) if err != nil { return err } @@ -121,7 +121,7 @@ func ConfigureServerForBackup(m *ObTenantBackupPolicyManager) tasktypes.TaskErro if err != nil { return err } - backupPath := m.getBackupDestPath() + backupPath := m.getDestPath(m.BackupPolicy.Spec.DataBackup.Destination) if len(backupConfigs) == 0 { err = setBackupDest() if err != nil { @@ -218,10 +218,10 @@ func StopBackup(m *ObTenantBackupPolicyManager) tasktypes.TaskError { func CheckAndSpawnJobs(m *ObTenantBackupPolicyManager) tasktypes.TaskError { var backupPath string - if m.BackupPolicy.Spec.DataBackup.Destination.Type == constants.BackupDestTypeOSS { + if m.BackupPolicy.Spec.DataBackup.Destination.Type != constants.BackupDestTypeNFS { backupPath = m.BackupPolicy.Spec.DataBackup.Destination.Path } else { - backupPath = m.getBackupDestPath() + backupPath = m.getDestPath(m.BackupPolicy.Spec.DataBackup.Destination) } // Avoid backup failure due to destination modification latestFull, err := m.getLatestBackupJobOfTypeAndPath(constants.BackupJobTypeFull, backupPath) diff --git a/internal/resource/obtenantbackuppolicy/utils.go b/internal/resource/obtenantbackuppolicy/utils.go index 67312538f..5779ceb67 100644 --- a/internal/resource/obtenantbackuppolicy/utils.go +++ b/internal/resource/obtenantbackuppolicy/utils.go @@ -102,27 +102,8 @@ func (m *ObTenantBackupPolicyManager) getOperationManager() (*operation.Oceanbas return con, nil } -func (m *ObTenantBackupPolicyManager) getArchiveDestPath() string { - targetDest := m.BackupPolicy.Spec.LogArchive.Destination - if targetDest.Type == constants.BackupDestTypeNFS || resourceutils.IsZero(targetDest.Type) { - return "file://" + path.Join(oceanbaseconst.BackupPath, targetDest.Path) - } else if targetDest.Type == constants.BackupDestTypeOSS && targetDest.OSSAccessSecret != "" { - secret := &v1.Secret{} - err := m.Client.Get(m.Ctx, types.NamespacedName{ - Namespace: m.BackupPolicy.GetNamespace(), - Name: targetDest.OSSAccessSecret, - }, secret) - if err != nil { - m.PrintErrEvent(err) - return "" - } - return strings.Join([]string{targetDest.Path, "access_id=" + string(secret.Data["accessId"]), "access_key=" + string(secret.Data["accessKey"])}, "&") - } - return targetDest.Path -} - func (m *ObTenantBackupPolicyManager) getArchiveDestSettingValue() string { - path := m.getArchiveDestPath() + path := m.getDestPath(m.BackupPolicy.Spec.LogArchive.Destination) archiveSpec := m.BackupPolicy.Spec.LogArchive if archiveSpec.SwitchPieceInterval != "" { path += fmt.Sprintf(" PIECE_SWITCH_INTERVAL=%s", archiveSpec.SwitchPieceInterval) @@ -133,23 +114,29 @@ func (m *ObTenantBackupPolicyManager) getArchiveDestSettingValue() string { return "LOCATION=" + path } -func (m *ObTenantBackupPolicyManager) getBackupDestPath() string { - targetDest := m.BackupPolicy.Spec.DataBackup.Destination - if targetDest.Type == constants.BackupDestTypeNFS || resourceutils.IsZero(targetDest.Type) { - return "file://" + path.Join(oceanbaseconst.BackupPath, targetDest.Path) - } else if targetDest.Type == constants.BackupDestTypeOSS && targetDest.OSSAccessSecret != "" { - secret := &v1.Secret{} - err := m.Client.Get(m.Ctx, types.NamespacedName{ - Namespace: m.BackupPolicy.GetNamespace(), - Name: targetDest.OSSAccessSecret, - }, secret) - if err != nil { - m.PrintErrEvent(err) - return "" - } - return strings.Join([]string{targetDest.Path, "access_id=" + string(secret.Data["accessId"]), "access_key=" + string(secret.Data["accessKey"])}, "&") +func (m *ObTenantBackupPolicyManager) getDestPath(dest apitypes.BackupDestination) string { + if dest.Type == constants.BackupDestTypeNFS || resourceutils.IsZero(dest.Type) { + return "file://" + path.Join(oceanbaseconst.BackupPath, dest.Path) + } + if dest.OSSAccessSecret == "" { + return "" + } + secret := &v1.Secret{} + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.BackupPolicy.GetNamespace(), + Name: dest.OSSAccessSecret, + }, secret) + if err != nil { + m.PrintErrEvent(err) + return "" + } + destPath := strings.Join([]string{dest.Path, "access_id=" + string(secret.Data["accessId"]), "access_key=" + string(secret.Data["accessKey"])}, "&") + if dest.Type == constants.BackupDestTypeCOS { + destPath += ("&appid=" + string(secret.Data["appId"])) + } else if dest.Type == constants.BackupDestTypeS3 { + destPath += ("&s3_region=" + string(secret.Data["s3Region"])) } - return targetDest.Path + return destPath } func (m *ObTenantBackupPolicyManager) createBackupJob(jobType apitypes.BackupJobType) error { @@ -160,10 +147,10 @@ func (m *ObTenantBackupPolicyManager) createBackupJob(jobType apitypes.BackupJob case constants.BackupJobTypeIncr: fallthrough case constants.BackupJobTypeFull: - path = m.getBackupDestPath() + path = m.getDestPath(m.BackupPolicy.Spec.DataBackup.Destination) case constants.BackupJobTypeArchive: - path = m.getArchiveDestPath() + path = m.getDestPath(m.BackupPolicy.Spec.LogArchive.Destination) } var tenantRecordName string var tenantSecret string