diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml new file mode 100644 index 000000000..e756d0ae0 --- /dev/null +++ b/.github/workflows/ci-tests.yml @@ -0,0 +1,41 @@ +name: run tests + +on: + pull_request: + branches: + - master + - "*_release" + paths: + - '**/*.go' + push: + branches: + - master + - "*_release" + paths: + - '**/*.go' + +env: + GO_VERSION: "1.22" + +jobs: + run-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Set up Go 1.x + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + + - name: go mod vendor + run: go mod vendor + + - name: install tools + run: make tools + + - name: test webhooks + run: make test-webhooks + \ No newline at end of file diff --git a/Makefile b/Makefile index 3fe1412cb..7689a202b 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ include make/* -VERSION ?= 2.2.2 +VERSION ?= 2.3.0 # Image URL to use all building/pushing image targets IMG ?= quay.io/oceanbase/ob-operator:${VERSION} # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. diff --git a/README-CN.md b/README-CN.md index 139b5a56d..6c6793d9e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -15,7 +15,7 @@ ob-operator 是满足 Kubernetes Operator 扩展范式的自动化工具,可 ob-operator 依赖 [cert-manager](https://cert-manager.io/docs/), cert-manager 的安装可以参考对应的[安装文档](https://cert-manager.io/docs/installation/),如果您无法访问官方制品托管在 `quay.io` 镜像站的镜像,可通过下面的指令安装我们转托在 `docker.io` 中的制品: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` 本例子中的 OceanBase 集群存储依赖 [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 提供, 需要提前进行安装并确保其存储目的地有足够大的磁盘空间。如果您计划在生产环境部署,推荐使用其他的存储解决方案。我们在[存储兼容性](#存储兼容性)一节提供了我们测试过的存储兼容性结果。 @@ -29,7 +29,7 @@ kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_r - 稳定版本 ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - 开发版本 @@ -45,7 +45,7 @@ Helm Chart 将 ob-operator 部署的命名空间进行了参数化,可在安 ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` #### 使用 terraform @@ -97,7 +97,7 @@ kubectl create secret generic root-password --from-literal=password='root_passwo 通过以下命令即可在 K8s 集群中部署 OceanBase: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` 一般初始化集群需要 2 分钟左右的时间,执行以下命令,查询集群状态,当集群状态变成 running 之后表示集群创建和初始化成功: diff --git a/README.md b/README.md index 9c30bd067..c10a30585 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ ob-operator relies on [cert-manager](https://cert-manager.io/docs/) for certific If you have trouble accessing `quay.io` image registry, our mirrored cert-manager manifests can be applied through following command: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` Storage of OceanBase cluster in this example relies on [local-path-provisioner](https://github.com/rancher/local-path-provisioner), which should be installed beforehand. You should confirm that there is enough disk space in storage destination of local-path-provisioner. If you decide to deploy OceanBase cluster in production environment, it is recommended to use other storage solutions. We have provided a compatible table for storage solutions that we tested in section [Storage Compatibility](#storage-compatibility). @@ -30,7 +30,7 @@ You can deploy ob-operator in a Kubernetes cluster by executing the following co - Stable ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - Development @@ -46,7 +46,7 @@ Helm Chart parameterizes the namespace in which ob-operator is deployed, allowin ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` #### Using terraform @@ -98,7 +98,7 @@ kubectl create secret generic root-password --from-literal=password='root_passwo You can deploy OceanBase in a Kubernetes cluster by executing the following command: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` It generally takes around 2 minutes to bootstrap a cluster. Execute the following command to check the status of the cluster. Once the cluster status changes to "running," it indicates that the cluster has been successfully created and bootstrapped: diff --git a/api/v1alpha1/obcluster_types.go b/api/v1alpha1/obcluster_types.go index 7c844c56e..2f29d3a73 100644 --- a/api/v1alpha1/obcluster_types.go +++ b/api/v1alpha1/obcluster_types.go @@ -88,5 +88,7 @@ func init() { } func (c *OBCluster) SupportStaticIP() bool { - return c.Annotations[oceanbaseconst.AnnotationsSupportStaticIP] == "true" + return c.Annotations[oceanbaseconst.AnnotationsSupportStaticIP] == "true" || + c.Annotations[oceanbaseconst.AnnotationsMode] == oceanbaseconst.ModeService || + c.Annotations[oceanbaseconst.AnnotationsMode] == oceanbaseconst.ModeStandalone } diff --git a/api/v1alpha1/obcluster_webhook_test.go b/api/v1alpha1/obcluster_webhook_test.go index 97b80b436..58cef98f5 100644 --- a/api/v1alpha1/obcluster_webhook_test.go +++ b/api/v1alpha1/obcluster_webhook_test.go @@ -148,7 +148,7 @@ var _ = Describe("Test OBCluster Webhook", Label("webhook"), func() { It("Validate existence of secrets", func() { By("Create normal cluster") - cluster := newOBCluster("test", 1, 1) + cluster := newOBCluster("test3", 1, 1) cluster.Spec.UserSecrets.Monitor = "" cluster.Spec.UserSecrets.ProxyRO = "" cluster.Spec.UserSecrets.Operator = "" @@ -158,17 +158,19 @@ var _ = Describe("Test OBCluster Webhook", Label("webhook"), func() { cluster2.Spec.UserSecrets.Monitor = "secret-that-does-not-exist" cluster2.Spec.UserSecrets.ProxyRO = "" cluster2.Spec.UserSecrets.Operator = "" - Expect(k8sClient.Create(ctx, cluster)).ShouldNot(Succeed()) + Expect(k8sClient.Create(ctx, cluster2)).Should(Succeed()) + cluster3 := newOBCluster("test3", 1, 1) cluster2.Spec.UserSecrets.Monitor = wrongKeySecret - Expect(k8sClient.Create(ctx, cluster)).ShouldNot(Succeed()) + Expect(k8sClient.Create(ctx, cluster3)).ShouldNot(Succeed()) Expect(k8sClient.Delete(ctx, cluster)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, cluster2)).Should(Succeed()) }) It("Validate secrets creation and fetch them", func() { By("Create normal cluster") - cluster := newOBCluster("test", 1, 1) + cluster := newOBCluster("test-create-secrets", 1, 1) cluster.Spec.UserSecrets.Monitor = "" cluster.Spec.UserSecrets.ProxyRO = "" cluster.Spec.UserSecrets.Operator = "" @@ -178,6 +180,7 @@ var _ = Describe("Test OBCluster Webhook", Label("webhook"), func() { Expect(cluster.Spec.UserSecrets.Monitor).ShouldNot(BeEmpty()) Expect(cluster.Spec.UserSecrets.ProxyRO).ShouldNot(BeEmpty()) Expect(cluster.Spec.UserSecrets.Operator).ShouldNot(BeEmpty()) + Expect(k8sClient.Delete(ctx, cluster)).Should(Succeed()) }) It("Validate single pvc with multiple storage classes", func() { diff --git a/api/v1alpha1/obtenant_webhook.go b/api/v1alpha1/obtenant_webhook.go index 6412f60eb..6358e0f3a 100644 --- a/api/v1alpha1/obtenant_webhook.go +++ b/api/v1alpha1/obtenant_webhook.go @@ -18,6 +18,7 @@ package v1alpha1 import ( "context" + "errors" "fmt" "regexp" "strings" @@ -291,55 +292,13 @@ func (r *OBTenant) validateMutation() error { if res.ArchiveSource == nil && res.BakDataSource == nil && res.SourceUri == "" { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore"), res, "Restore must have a source option, but both archiveSource, bakDataSource and sourceUri are nil now")) - } - - if res.ArchiveSource != nil && res.ArchiveSource.Type == constants.BackupDestTypeOSS { - if res.ArchiveSource.OSSAccessSecret == "" { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("archiveSource").Child("ossAccessSecret"), res.ArchiveSource.OSSAccessSecret, "Tenant restoring from OSS type backup data must have a OSSAccessSecret")) - } else { - secret := &v1.Secret{} - err := tenantClt.Get(context.Background(), types.NamespacedName{ - Namespace: r.GetNamespace(), - Name: res.ArchiveSource.OSSAccessSecret, - }, secret) - if err != nil { - if apierrors.IsNotFound(err) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("archiveSource").Child("ossAccessSecret"), res.ArchiveSource.OSSAccessSecret, "Given OSSAccessSecret not found")) - } - allErrs = append(allErrs, field.InternalError(field.NewPath("spec").Child("source").Child("restore").Child("archiveSource").Child("ossAccessSecret"), err)) - } else { - if _, ok := secret.Data["accessId"]; !ok { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("archiveSource").Child("ossAccessSecret"), res.ArchiveSource.OSSAccessSecret, "accessId field not found in given OSSAccessSecret")) - } - if _, ok := secret.Data["accessKey"]; !ok { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("archiveSource").Child("ossAccessSecret"), res.ArchiveSource.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret")) - } - } - } - } - - if res.BakDataSource != nil && res.BakDataSource.Type == constants.BackupDestTypeOSS { - if res.BakDataSource.OSSAccessSecret == "" { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("bakDataSource").Child("ossAccessSecret"), res.BakDataSource.OSSAccessSecret, "Tenant restoring from OSS type backup data must have a OSSAccessSecret")) - } else { - secret := &v1.Secret{} - err := tenantClt.Get(context.Background(), types.NamespacedName{ - Namespace: r.GetNamespace(), - Name: res.BakDataSource.OSSAccessSecret, - }, secret) - if err != nil { - if apierrors.IsNotFound(err) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("bakDataSource").Child("ossAccessSecret"), res.BakDataSource.OSSAccessSecret, "Given OSSAccessSecret not found")) - } - allErrs = append(allErrs, field.InternalError(field.NewPath("spec").Child("source").Child("restore").Child("bakDataSource").Child("ossAccessSecret"), err)) - } else { - if _, ok := secret.Data["accessId"]; !ok { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("bakDataSource").Child("ossAccessSecret"), res.BakDataSource.OSSAccessSecret, "accessId field not found in given OSSAccessSecret")) - } - if _, ok := secret.Data["accessKey"]; !ok { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("source").Child("restore").Child("bakDataSource").Child("ossAccessSecret"), res.BakDataSource.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret")) - } - } + } else { + destErrs := errors.Join( + validateBackupDestination(cluster, res.ArchiveSource, "spec", "source", "restore", "archiveSource"), + validateBackupDestination(cluster, res.BakDataSource, "spec", "source", "restore", "bakDataSource"), + ) + if destErrs != nil { + return destErrs } } } @@ -355,3 +314,68 @@ func (r *OBTenant) ValidateDelete() (admission.Warnings, error) { // TODO(user): fill in your validation logic upon object deletion. return nil, nil } + +func validateBackupDestination(cluster *OBCluster, dest *apitypes.BackupDestination, paths ...string) error { + var errorPath *field.Path + if len(paths) == 0 { + errorPath = field.NewPath("spec").Child("destination") + } else { + errorPath = field.NewPath("spec").Child(paths[0]) + for _, p := range paths[1:] { + errorPath = errorPath.Child(p) + } + } + if dest.Type == constants.BackupDestTypeNFS && cluster.Spec.BackupVolume == nil { + return field.Invalid(errorPath, cluster.Spec.BackupVolume, "backupVolume of obcluster is required when backing up data to NFS") + } + pattern, ok := constants.DestPathPatternMapping[dest.Type] + if !ok { + return field.Invalid(errorPath.Child("destination").Child("type"), dest.Type, "invalid backup destination type") + } + if !pattern.MatchString(dest.Path) { + return field.Invalid(errorPath.Child("destination").Child("path"), dest.Path, "invalid backup destination path, the path format should be "+pattern.String()) + } + if dest.Type != constants.BackupDestTypeNFS { + if dest.OSSAccessSecret == "" { + return field.Invalid(errorPath.Child("destination"), dest.OSSAccessSecret, "OSSAccessSecret is required when backing up data to OSS, COS or S3") + } + secret := &v1.Secret{} + err := bakClt.Get(context.Background(), types.NamespacedName{ + Namespace: cluster.GetNamespace(), + Name: dest.OSSAccessSecret, + }, secret) + fieldPath := errorPath.Child("destination").Child("ossAccessSecret") + if err != nil { + if apierrors.IsNotFound(err) { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "Given OSSAccessSecret not found") + } + return field.InternalError(fieldPath, err) + } + // All the following types need accessId and accessKey + switch dest.Type { + case + constants.BackupDestTypeCOS, + constants.BackupDestTypeOSS, + constants.BackupDestTypeS3, + constants.BackupDestTypeS3Compatible: + if _, ok := secret.Data["accessId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessId field not found in given OSSAccessSecret") + } + if _, ok := secret.Data["accessKey"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret") + } + } + // The following types need additional fields + switch dest.Type { + case constants.BackupDestTypeCOS: + if _, ok := secret.Data["appId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "appId field not found in given OSSAccessSecret") + } + case constants.BackupDestTypeS3: + if _, ok := secret.Data["s3Region"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "s3Region field not found in given OSSAccessSecret") + } + } + } + return nil +} diff --git a/api/v1alpha1/obtenantbackuppolicy_webhook.go b/api/v1alpha1/obtenantbackuppolicy_webhook.go index 66ac4f818..4ae6a9c27 100644 --- a/api/v1alpha1/obtenantbackuppolicy_webhook.go +++ b/api/v1alpha1/obtenantbackuppolicy_webhook.go @@ -304,44 +304,46 @@ func (r *OBTenantBackupPolicy) validateDestination(cluster *OBCluster, dest *api if !pattern.MatchString(dest.Path) { return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.Path, "invalid backup destination path, the path format should be "+pattern.String()) } - if dest.Type != constants.BackupDestTypeNFS && dest.OSSAccessSecret == "" { - return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.OSSAccessSecret, "OSSAccessSecret is required when backing up data to OSS, COS or S3") - } - secret := &v1.Secret{} - err := bakClt.Get(context.Background(), types.NamespacedName{ - Namespace: r.GetNamespace(), - Name: dest.OSSAccessSecret, - }, secret) - fieldPath := field.NewPath("spec").Child(fieldName).Child("destination").Child("ossAccessSecret") - if err != nil { - if apierrors.IsNotFound(err) { - return field.Invalid(fieldPath, dest.OSSAccessSecret, "Given OSSAccessSecret not found") - } - return field.InternalError(fieldPath, err) - } - // All the following types need accessId and accessKey - switch dest.Type { - case - constants.BackupDestTypeCOS, - constants.BackupDestTypeOSS, - constants.BackupDestTypeS3, - constants.BackupDestTypeS3Compatible: - if _, ok := secret.Data["accessId"]; !ok { - return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessId field not found in given OSSAccessSecret") + if dest.Type != constants.BackupDestTypeNFS { + if dest.OSSAccessSecret == "" { + return field.Invalid(field.NewPath("spec").Child(fieldName).Child("destination"), dest.OSSAccessSecret, "OSSAccessSecret is required when backing up data to OSS, COS or S3") } - if _, ok := secret.Data["accessKey"]; !ok { - return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret") + secret := &v1.Secret{} + err := bakClt.Get(context.Background(), types.NamespacedName{ + Namespace: r.GetNamespace(), + Name: dest.OSSAccessSecret, + }, secret) + fieldPath := field.NewPath("spec").Child(fieldName).Child("destination").Child("ossAccessSecret") + if err != nil { + if apierrors.IsNotFound(err) { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "Given OSSAccessSecret not found") + } + return field.InternalError(fieldPath, err) } - } - // The following types need additional fields - switch dest.Type { - case constants.BackupDestTypeCOS: - if _, ok := secret.Data["appId"]; !ok { - return field.Invalid(fieldPath, dest.OSSAccessSecret, "appId field not found in given OSSAccessSecret") + // All the following types need accessId and accessKey + switch dest.Type { + case + constants.BackupDestTypeCOS, + constants.BackupDestTypeOSS, + constants.BackupDestTypeS3, + constants.BackupDestTypeS3Compatible: + if _, ok := secret.Data["accessId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessId field not found in given OSSAccessSecret") + } + if _, ok := secret.Data["accessKey"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "accessKey field not found in given OSSAccessSecret") + } } - case constants.BackupDestTypeS3: - if _, ok := secret.Data["s3Region"]; !ok { - return field.Invalid(fieldPath, dest.OSSAccessSecret, "s3Region field not found in given OSSAccessSecret") + // The following types need additional fields + switch dest.Type { + case constants.BackupDestTypeCOS: + if _, ok := secret.Data["appId"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "appId field not found in given OSSAccessSecret") + } + case constants.BackupDestTypeS3: + if _, ok := secret.Data["s3Region"]; !ok { + return field.Invalid(fieldPath, dest.OSSAccessSecret, "s3Region field not found in given OSSAccessSecret") + } } } return nil diff --git a/api/v1alpha1/obtenantoperation_webhook.go b/api/v1alpha1/obtenantoperation_webhook.go index 994551f12..a7e90643c 100644 --- a/api/v1alpha1/obtenantoperation_webhook.go +++ b/api/v1alpha1/obtenantoperation_webhook.go @@ -226,15 +226,23 @@ func (r *OBTenantOperation) validateMutation() error { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("targetTenant"), r.Spec.TargetTenant, "The target tenant is not a standby")) } } + case constants.TenantOpSetUnitNumber, + constants.TenantOpSetConnectWhiteList, + constants.TenantOpAddResourcePools, + constants.TenantOpModifyResourcePools, + constants.TenantOpDeleteResourcePools: + return r.validateNewOperations() default: - if r.Spec.TargetTenant == nil { - allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("targetTenant"), "name of targetTenant is required")) - } - } - if len(allErrs) != 0 { - return allErrs.ToAggregate() + allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("type"), string(r.Spec.Type)+" type of operation is not supported")) } + return allErrs.ToAggregate() +} +func (r *OBTenantOperation) validateNewOperations() error { + if r.Spec.TargetTenant == nil { + return field.Required(field.NewPath("spec").Child("targetTenant"), "name of targetTenant is required") + } + allErrs := field.ErrorList{} obtenant := &OBTenant{} err := clt.Get(context.Background(), types.NamespacedName{Name: *r.Spec.TargetTenant, Namespace: r.Namespace}, obtenant) if err != nil { @@ -274,9 +282,11 @@ func (r *OBTenantOperation) validateMutation() error { } else { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("targetTenant"), r.Spec.TargetTenant, "The target tenant's cluster "+obtenant.Spec.ClusterName+" does not exist")) } + break } if obcluster.Spec.Topology == nil { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("targetTenant"), r.Spec.TargetTenant, "The target tenant's cluster "+obtenant.Spec.ClusterName+" does not have a topology")) + break } pools := make(map[string]any) for _, pool := range obtenant.Spec.Pools { @@ -287,6 +297,9 @@ func (r *OBTenantOperation) validateMutation() error { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("addResourcePools"), r.Spec.AddResourcePools, "The resource pool already exists")) } } + if len(allErrs) != 0 { + return allErrs.ToAggregate() + } zonesInOBCluster := make(map[string]any, len(obcluster.Spec.Topology)) for _, zone := range obcluster.Spec.Topology { zonesInOBCluster[zone.Zone] = struct{}{} @@ -313,6 +326,7 @@ func (r *OBTenantOperation) validateMutation() error { case constants.TenantOpDeleteResourcePools: if len(r.Spec.DeleteResourcePools) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("deleteResourcePools"), "deleteResourcePools is required")) + break } pools := make(map[string]any) for _, pool := range obtenant.Spec.Pools { @@ -324,7 +338,6 @@ func (r *OBTenantOperation) validateMutation() error { } } default: - allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("type"), string(r.Spec.Type)+" type of operation is not supported")) } return allErrs.ToAggregate() } diff --git a/api/v1alpha1/obtenantoperation_webhook_test.go b/api/v1alpha1/obtenantoperation_webhook_test.go index dc31104ac..58d7f2336 100644 --- a/api/v1alpha1/obtenantoperation_webhook_test.go +++ b/api/v1alpha1/obtenantoperation_webhook_test.go @@ -13,10 +13,11 @@ See the Mulan PSL v2 for more details. package v1alpha1 import ( - apiconsts "github.com/oceanbase/ob-operator/api/constants" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/resource" + + apiconsts "github.com/oceanbase/ob-operator/api/constants" ) var _ = Describe("Test OBTenantOperation Webhook", Label("webhook"), Serial, func() { @@ -25,7 +26,7 @@ var _ = Describe("Test OBTenantOperation Webhook", Label("webhook"), Serial, fun tenantStandby := "test-tenant-for-operation2" It("Create cluster and tenants", func() { - c := newOBCluster(clusterName, 1, 1) + c := newOBCluster(clusterName, 3, 1) t := newOBTenant(tenantPrimary, clusterName) t2 := newOBTenant(tenantStandby, clusterName) t2.Spec.TenantRole = apiconsts.TenantRoleStandby @@ -35,6 +36,10 @@ var _ = Describe("Test OBTenantOperation Webhook", Label("webhook"), Serial, fun Expect(k8sClient.Create(ctx, c)).Should(Succeed()) Expect(k8sClient.Create(ctx, t)).Should(Succeed()) Expect(k8sClient.Create(ctx, t2)).Should(Succeed()) + + t.Status.TenantRole = apiconsts.TenantRolePrimary + t.Status.Pools = []ResourcePoolStatus{} + Expect(k8sClient.Status().Update(ctx, t)).Should(Succeed()) }) It("Check operation types", func() { @@ -119,6 +124,9 @@ var _ = Describe("Test OBTenantOperation Webhook", Label("webhook"), Serial, fun notexist := "tenant-not-exist" op.Spec.TargetTenant = ¬exist Expect(k8sClient.Create(ctx, op)).ShouldNot(Succeed()) + + op.Spec.TargetTenant = &tenantPrimary + Expect(k8sClient.Create(ctx, op)).Should(Succeed()) }) It("Check operation replay log", func() { @@ -144,4 +152,90 @@ var _ = Describe("Test OBTenantOperation Webhook", Label("webhook"), Serial, fun } Expect(k8sClient.Create(ctx, op)).ShouldNot(Succeed()) }) + + It("Check adding resource pools", func() { + op := newTenantOperation(tenantPrimary) + op.Spec.Type = apiconsts.TenantOpAddResourcePools + op.Spec.AddResourcePools = []ResourcePoolSpec{{ + Zone: "zone1", + Type: &LocalityType{ + Name: "Full", + Replica: 1, + IsActive: true, + }, + UnitConfig: &UnitConfig{ + MaxCPU: resource.MustParse("1"), + MemorySize: resource.MustParse("5Gi"), + MinCPU: resource.MustParse("1"), + MaxIops: 1024, + MinIops: 1024, + IopsWeight: 2, + LogDiskSize: resource.MustParse("12Gi"), + }, + }} + + notexist := "tenant-not-exist" + op.Spec.TargetTenant = ¬exist + + Expect(k8sClient.Create(ctx, op)).ShouldNot(Succeed()) + + op.Spec.TargetTenant = &tenantPrimary + Expect(k8sClient.Create(ctx, op)).ShouldNot(Succeed()) + + op.Spec.Force = true + Expect(k8sClient.Create(ctx, op)).Should(Succeed()) + + // Delete resource pool + opDel := newTenantOperation(tenantPrimary) + opDel.Spec.Type = apiconsts.TenantOpDeleteResourcePools + opDel.Spec.DeleteResourcePools = []string{"zone0"} + opDel.Spec.TargetTenant = &tenantPrimary + Expect(k8sClient.Create(ctx, opDel)).ShouldNot(Succeed()) + opDel.Spec.Force = true + Expect(k8sClient.Create(ctx, opDel)).Should(Succeed()) + }) + + It("Check modifying resource pools", func() { + op := newTenantOperation(tenantPrimary) + op.Spec.Type = apiconsts.TenantOpModifyResourcePools + op.Spec.ModifyResourcePools = []ResourcePoolSpec{{ + Zone: "zone0", + Type: &LocalityType{ + Name: "Full", + Replica: 1, + IsActive: true, + }, + UnitConfig: &UnitConfig{ + MaxCPU: resource.MustParse("6"), + MemorySize: resource.MustParse("6Gi"), + MinCPU: resource.MustParse("2"), + MaxIops: 1024, + MinIops: 1024, + IopsWeight: 2, + LogDiskSize: resource.MustParse("12Gi"), + }, + }} + + op.Spec.TargetTenant = &tenantPrimary + Expect(k8sClient.Create(ctx, op)).ShouldNot(Succeed()) + + op.Spec.Force = true + Expect(k8sClient.Create(ctx, op)).Should(Succeed()) + }) + + It("Check setting connection white list", func() { + op := newTenantOperation(tenantPrimary) + op.Spec.Type = apiconsts.TenantOpSetConnectWhiteList + op.Spec.ConnectWhiteList = "%,127.0.0.1" + op.Spec.Force = true + Expect(k8sClient.Create(ctx, op)).Should(Succeed()) + }) + + It("Check setting unit number", func() { + op := newTenantOperation(tenantPrimary) + op.Spec.Type = apiconsts.TenantOpSetUnitNumber + op.Spec.UnitNumber = 2 + op.Spec.Force = true + Expect(k8sClient.Create(ctx, op)).Should(Succeed()) + }) }) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 9926ecb2a..5f103f02f 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -210,6 +210,8 @@ var _ = AfterSuite(func() { By("Clean auxiliary resources") cancel() By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) + if testEnv != nil { + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + } }) diff --git a/build/Dockerfile.dashboard b/build/Dockerfile.dashboard index 164a2306c..af511e73e 100644 --- a/build/Dockerfile.dashboard +++ b/build/Dockerfile.dashboard @@ -1,11 +1,11 @@ -FROM node:18-alpine as builder-fe +FROM node:18-alpine AS builder-fe WORKDIR /workspace COPY ./ui . ENV NODE_OPTIONS=--max_old_space_size=5120 RUN yarn RUN yarn build -FROM golang:1.22 as builder-be +FROM golang:1.22 AS builder-be ARG GOPROXY=https://goproxy.io,direct ARG GOSUMDB=sum.golang.org ARG COMMIT_HASH=unknown diff --git a/build/Dockerfile.obhelper b/build/Dockerfile.obhelper index bbb4fb8ab..4eff551e5 100644 --- a/build/Dockerfile.obhelper +++ b/build/Dockerfile.obhelper @@ -1,4 +1,4 @@ -FROM golang:1.22 as builder +FROM golang:1.22 AS builder ARG GOPROXY=https://goproxy.io,direct WORKDIR /workspace COPY . . diff --git a/build/Dockerfile.operator b/build/Dockerfile.operator index 74a0c09ed..3442bf649 100644 --- a/build/Dockerfile.operator +++ b/build/Dockerfile.operator @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.22 as builder +FROM golang:1.22 AS builder ARG GOPROXY ARG GOSUMDB diff --git a/charts/ob-operator/Chart.yaml b/charts/ob-operator/Chart.yaml index 487ff0502..6797e9e8a 100644 --- a/charts/ob-operator/Chart.yaml +++ b/charts/ob-operator/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 2.2.2 +appVersion: 2.3.0 description: A Helm chart for OB-Operator name: ob-operator type: application -version: 2.2.2 +version: 2.3.0 diff --git a/charts/ob-operator/templates/operator.yaml b/charts/ob-operator/templates/operator.yaml index 05bfdf6fb..f1c0bb0c5 100644 --- a/charts/ob-operator/templates/operator.yaml +++ b/charts/ob-operator/templates/operator.yaml @@ -21377,7 +21377,7 @@ spec: - --log-verbosity=0 command: - /manager - image: quay.io/oceanbase/ob-operator:2.2.2 + image: quay.io/oceanbase/ob-operator:2.3.0 livenessProbe: httpGet: path: /healthz diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 496d2a7dc..8646b404a 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -5,4 +5,4 @@ kind: Kustomization images: - name: controller newName: quay.io/oceanbase/ob-operator - newTag: 2.2.2 + newTag: 2.3.0 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index b08bed649..cce6e8a44 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -21390,7 +21390,7 @@ spec: - --log-verbosity=0 command: - /manager - image: quay.io/oceanbase/ob-operator:2.2.2 + image: quay.io/oceanbase/ob-operator:2.3.0 livenessProbe: httpGet: path: /healthz diff --git a/docsite/docs/developer/deploy-locally.md b/docsite/docs/developer/deploy-locally.md index 53e73f0dc..8ca4f89e6 100644 --- a/docsite/docs/developer/deploy-locally.md +++ b/docsite/docs/developer/deploy-locally.md @@ -40,14 +40,14 @@ Tips: Perform `minikube dashboard` to open kubernetes dashboard, everything in t ob-operator depends on `cert-manager` to enable TLS functionalities, so we should install it first. ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` ### 4. Install ob-operator For robustness, default memory limit of ob-operator container is set to `1Gi` which is too large for us developing locally. We recommend fetching the manifests to local and configure it. wget tool could be useful here, while opening the URL and copying the contents to local file is more straight. -https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml Search the pattern `/manager`, find the target container, configure the memory limit to `400Mi` and cpu limit to `400m`. diff --git a/docsite/docs/developer/deploy.md b/docsite/docs/developer/deploy.md index 3fc5545d1..75d175dbc 100644 --- a/docsite/docs/developer/deploy.md +++ b/docsite/docs/developer/deploy.md @@ -12,20 +12,20 @@ ob-operator supports deployment using Helm. Before deploying ob-operator with th ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` Parameters: * namespace: Namespace, can be customized. It is recommended to use "oceanbase-system" as the namespace. -* version: ob-operator version number. It is recommended to use the latest version `2.2.2`. +* version: ob-operator version number. It is recommended to use the latest version `2.3.0`. ## 2.2 Deploying with Configuration Files * Stable ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` * Development ```shell diff --git a/docsite/docs/manual/200.quick-start-of-ob-operator.md b/docsite/docs/manual/200.quick-start-of-ob-operator.md index 4ba78fe29..ed2462e89 100644 --- a/docsite/docs/manual/200.quick-start-of-ob-operator.md +++ b/docsite/docs/manual/200.quick-start-of-ob-operator.md @@ -21,7 +21,7 @@ Run the following command to deploy ob-operator in the Kubernetes cluster: - Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - Deploy the developing version of ob-operator @@ -61,7 +61,7 @@ Perform the following steps to deploy an OceanBase cluster in the Kubernetes clu Run the following command to deploy an OceanBase cluster in the Kubernetes cluster: ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` In general, it takes about 2 minutes to create a cluster. Run the following command to check the cluster status: diff --git a/docsite/docs/manual/300.deploy-ob-operator.md b/docsite/docs/manual/300.deploy-ob-operator.md index a5b1f5a90..dad4f7ee5 100644 --- a/docsite/docs/manual/300.deploy-ob-operator.md +++ b/docsite/docs/manual/300.deploy-ob-operator.md @@ -33,7 +33,7 @@ You can deploy ob-operator by using the configuration file for the stable or dev * Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` * Deploy the developing version of ob-operator diff --git a/docsite/docs/manual/400.ob-operator-upgrade.md b/docsite/docs/manual/400.ob-operator-upgrade.md index 44e79d4ee..812cb58e8 100644 --- a/docsite/docs/manual/400.ob-operator-upgrade.md +++ b/docsite/docs/manual/400.ob-operator-upgrade.md @@ -17,7 +17,7 @@ If you upgrade ob-operator by using configuration files, you only need to reappl - Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - Deploy the developing version of ob-operator diff --git a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md index 37af9a02d..cd505b2dd 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md @@ -16,7 +16,7 @@ Before you create a tenant, make sure the following conditions are met: ## Create a tenant by using the configuration file -You can create a tenant by using the configuration file of the tenant. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.2_release/example/tenant/tenant.yaml). +You can create a tenant by using the configuration file of the tenant. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.3.0_release/example/tenant/tenant.yaml). Run the following command to create a tenant. This command creates an OceanBase Database tenant with custom resources in the current Kubernetes cluster. diff --git a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md index 2a548c86c..a6e449c8f 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md @@ -8,7 +8,7 @@ This topic describes how to use ob-operator to delete a tenant from a Kubernetes ## Procedure -You can delete the specified tenant resources from the cluster by using the configuration file `tenant.yaml`. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.2_release/example/tenant/tenant.yaml). +You can delete the specified tenant resources from the cluster by using the configuration file `tenant.yaml`. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.3.0_release/example/tenant/tenant.yaml). Run the following command to delete a tenant. This command deletes an OceanBase Database tenant with custom resources in the current Kubernetes cluster. diff --git a/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md index 4351cc0ae..a1f8a2276 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md @@ -9,3 +9,4 @@ ob-operator ensures the high availability of data by using the following feature * Node fault recovery. The distributed architecture of OceanBase Database allows you to restore the service when a minority of nodes fail. By relying on certain network plugins, you can even restore the service from majority nodes failure. For more information, see [Recover from node failure](300.disaster-recovery-of-ob-operator.md). * Backup and restore of tenant data. The backup and restore feature of OceanBase Database allows you to back up tenant data to different storage media to ensure data safety. For more information, see [Back up a tenant](400.tenant-backup-of-ob-operator.md). * Primary and standby tenants. OceanBase Database allows you to create a standby tenant for the primary tenant. When a fault occurs to the primary tenant, you can quickly switch your business to the standby tenant to reduce the business interruption. For more information, see [Physical standby database](600.standby-tenant-of-ob-operator.md). +* Inter K8s cluster management. OceanBase can be deployed across multiple K8s cluster, this is a huge improment of high-availability and also gives the user more confident to operator the K8s cluster running OceanBase workloads. see [Inter K8s cluster management](700.inter-k8s-cluster-management.md). diff --git a/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md new file mode 100644 index 000000000..e204108d0 --- /dev/null +++ b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md @@ -0,0 +1,95 @@ +--- +sidebar_position: 6 +--- + +# Inter K8s cluster management + +:::tip +This feature is available in ob-operator version 2.3.0 and later. +Prerequisite: Pod and service connectivity must be established across all K8s clusters involved. +::: + +Deploying workloads across multiple K8s clusters enhances scalability, reliability, and security. By deploying zones (obzones) across different K8s clusters, you can fully leverage OceanBase's high-availability design. This approach provides disaster tolerance at the K8s cluster level, making operations more resilient. + +## Architecture +![inter-k8s-cluster-architecture](/img/inter-k8s-cluster-architecture.jpg) + +As shown in the architecture diagram, K8s clusters play different roles. The cluster running the ob-operator is referred to as the master cluster, while the other clusters are called worker clusters. +Worker clusters are registered by creating a custom resource of type K8sCluster in the master cluster. The ob-operator accesses these worker clusters using credentials stored in these resources. While OceanBase workloads run as native K8s resources in the worker clusters, the custom resources for OceanBase remain in the master cluster. + +## How to add `worker` K8s Cluster +To add a worker cluster, ensure the credentials used for access have permissions to `get`, `list`, `watch`, `create`, `update`, `patch` and `delete` resources of type `pod`, `service`, `pvc`, `job` and `namespace`. Follow the example below to create a K8sCluster resource by replacing the placeholder under kubeConfig with your worker cluster’s credentials, then apply it to the master cluster. + +```yaml k8s_cluster.yaml +apiVersion: k8s.oceanbase.com/v1alpha1 +kind: K8sCluster +metadata: + name: k8s-remote +spec: + name: remote + description: "This is the remote k8s cluster for testing" + kubeConfig: | + # Typically you can found it in ~/.kube/config +``` + +Verify the resource using the following command +```bash +kubectl get k8scluster +``` + +The expected output should look like this +```bash +NAME AGE CLUSTERNAME +k8s-remote 1m remote +``` + +## Create OceanBase Cluster across multiple K8s clusters +To create an OceanBase cluster across multiple K8s clusters, the only difference compared with deploy it in a single K8s cluster is to specify in which K8s cluster the obzone should be created, you may reference the following example. +```yaml multi-k8s-cluster.yaml +apiVersion: oceanbase.oceanbase.com/v1alpha1 +kind: OBCluster +metadata: + name: test + namespace: default + # annotations: + # "oceanbase.oceanbase.com/independent-pvc-lifecycle": "true" + # "oceanbase.oceanbase.com/mode": "service" +spec: + clusterName: test + clusterId: 1 + userSecrets: + root: root-password + topology: + - zone: zone1 + replica: 1 + - zone: zone2 + replica: 1 + k8sCluster: k8s-cluster-hz + - zone: zone3 + replica: 1 + k8sCluster: k8s-cluster-sh + observer: + image: oceanbase/oceanbase-cloud-native:4.2.1.7-107000162024060611 + resource: + cpu: 2 + memory: 10Gi + storage: + dataStorage: + storageClass: local-path + size: 50Gi + redoLogStorage: + storageClass: local-path + size: 50Gi + logStorage: + storageClass: local-path + size: 20Gi + parameters: + - name: system_memory + value: 1G + - name: "__min_full_resource_pool_memory" + value: "2147483648" # 2G +``` + +## Managing OceanBase Cluster in multiple K8s clusters +Managing an OceanBase cluster across multiple K8s clusters remains straightforward. Simply modify the custom resources in the master cluster, and the ob-operator will synchronize the changes with the relevant resources in the worker clusters. + diff --git a/docsite/docs/manual/900.appendix/100.example.md b/docsite/docs/manual/900.appendix/100.example.md index 251953ebf..a2e1df7bb 100644 --- a/docsite/docs/manual/900.appendix/100.example.md +++ b/docsite/docs/manual/900.appendix/100.example.md @@ -27,7 +27,7 @@ In this example, the following components are deployed: Create a namespace: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/namespace.yaml ``` View the created namespace: @@ -46,7 +46,7 @@ oceanbase Active 98s Create secrets for the cluster and tenants: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/secret.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/secret.yaml ``` View the created secrets: @@ -73,7 +73,7 @@ ob-configserver allows you to register, store, and query metadata of the RootSer Run the following command to deploy ob-configserver and create the corresponding service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/configserver.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/configserver.yaml ``` Check the pod status: @@ -101,7 +101,7 @@ When you deploy an OceanBase cluster, add environment variables and set the syst Deploy the OceanBase cluster: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/obcluster.yaml ``` Run the following command to query the status of the OceanBase cluster until the status becomes `running`: @@ -121,7 +121,7 @@ You can start ODP by using ob-configserver or specifying the RS list. To maximiz Run the following command to deploy ODP and create the ODP service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/obproxy.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/obproxy.yaml ``` When you query the pod status of ODP, you can see two ODP pods. @@ -165,7 +165,7 @@ You can create a dedicated tenant for each type of business for better resource Run the following command to create a tenant: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/tenant.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/tenant.yaml ``` Run the following command to query the status of the tenant until the status becomes `running`: @@ -201,7 +201,7 @@ create database dev; Run the following command to deploy the application: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/oceanbase-todo.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/oceanbase-todo.yaml ``` After the deployment process is completed, run the following command to view the application status: @@ -254,7 +254,7 @@ When you deploy the OceanBase cluster, an OBAgent sidecar container is created i Run the following command to deploy Prometheus: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/prometheus.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/prometheus.yaml ``` Run the following command to view the deployment status: @@ -276,7 +276,7 @@ Grafana displays the metrics of OceanBase Database by using Prometheus as a data Run the following command to deploy Grafana: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/grafana.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/grafana.yaml ``` Run the following command to view the deployment status: @@ -302,4 +302,4 @@ This topic describes how to deploy OceanBase Database and related components suc ## Note -You can find all configuration files used in this topic in the [webapp](https://github.com/oceanbase/ob-operator/tree/2.2.2_release/example/webapp) directory. +You can find all configuration files used in this topic in the [webapp](https://github.com/oceanbase/ob-operator/tree/2.3.0_release/example/webapp) directory. diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md index 53e73f0dc..8ca4f89e6 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md @@ -40,14 +40,14 @@ Tips: Perform `minikube dashboard` to open kubernetes dashboard, everything in t ob-operator depends on `cert-manager` to enable TLS functionalities, so we should install it first. ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` ### 4. Install ob-operator For robustness, default memory limit of ob-operator container is set to `1Gi` which is too large for us developing locally. We recommend fetching the manifests to local and configure it. wget tool could be useful here, while opening the URL and copying the contents to local file is more straight. -https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml Search the pattern `/manager`, find the target container, configure the memory limit to `400Mi` and cpu limit to `400m`. diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy.md index e1a9e9215..908a16fdf 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy.md @@ -10,20 +10,20 @@ ob-operator 支持通过 Helm 进行部署,在使用 Helm 命令部署 ob-oper ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` 参数说明: * namespace:命名空间,可自定义,一般建议使用 oceanbase-system。 -* version:ob-operator 版本号,建议使用最新的版本 `2.2.2`。 +* version:ob-operator 版本号,建议使用最新的版本 `2.3.0`。 ## 2.2 使用配置文件部署 * Stable ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` * Development ```shell diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md index 2610061ed..f9c2cc787 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md @@ -21,7 +21,7 @@ sidebar_position: 2 - 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - 开发版本 @@ -61,7 +61,7 @@ oceanbase-controller-manager-86cfc8f7bf-4hfnj 2/2 Running 0 1m 使用以下命令在 Kubernetes 集群上部署 OceanBase 集群: ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` 集群创建通常需要约 2 分钟。执行以下命令检查集群状态: diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md index cddb53e61..335848484 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md @@ -32,7 +32,7 @@ helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system -- * 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` * 开发版本 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md index 33fc19f55..149f41743 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md @@ -17,7 +17,7 @@ sidebar_position: 4 - 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - 开发版本 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md index 3824da662..391c89a8f 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md @@ -16,7 +16,7 @@ sidebar_position: 2 ## 使用配置文件创建租户 -通过应用租户配置文件创建租户。配置文件内容可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.2_release/example/tenant/tenant.yaml) 。 +通过应用租户配置文件创建租户。配置文件内容可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.3.0_release/example/tenant/tenant.yaml) 。 创建租户的命令如下,该命令会在当前 Kubernetes 集群中创建一个 OBTenant 租户的资源。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md index 1f760516d..a5dc07fc6 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md @@ -8,7 +8,7 @@ sidebar_position: 4 ## 具体操作 -通过配置文件 tenant.yaml 在集群中删除指定的租户资源。配置文件可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.2_release/example/tenant/tenant.yaml)。 +通过配置文件 tenant.yaml 在集群中删除指定的租户资源。配置文件可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.3.0_release/example/tenant/tenant.yaml)。 删除租户的命令如下,该命令会在当前 Kubernetes 集群中删除对应租户的 OBTenant 资源。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md index 6471ae7e8..d9fa4807d 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md @@ -9,3 +9,4 @@ ob-operator 利用 OceanBase 的若干特性来保证数据的高可用 * [节点故障恢复](300.disaster-recovery-of-ob-operator.md),基于 OceanBase 分布式的特性,可以从少数派节点故障的情况恢复,利用特定的网络插件甚至能实现全部节点故障的恢复。 * [租户数据备份恢复](400.tenant-backup-of-ob-operator.md),利用 OceanBase 的备份恢复能力,可以将租户的数据备份到其他存储介质,为数据提供更安全的保障。 * [主备租户](600.standby-tenant-of-ob-operator.md),利用 OceanBase 的主备租户能力,可以建立两个租户的主备关系,在故障发生时可以很快切换,能保证业务受到的影响更小。 +* [多 K8s 集群部署](700.inter-k8s-cluster-management.md),支持将一个 OceanBase 集群部署在多个 K8s 集群中, 可以显著的提高 OceanBase 的高可用能力,也给了用户更多信心来运维运行 OceanBase 负载的 K8s 集群。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md new file mode 100644 index 000000000..d91942269 --- /dev/null +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/300.high-availability/700.inter-k8s-cluster-management.md @@ -0,0 +1,95 @@ +--- +sidebar_position: 6 +--- + +# 多 K8s 集群部署 + +:::tip +此功能适用于 ob-operator 2.3.0 及更高版本。 +前提条件: 需要在所有 K8s 集群之间保证 Pod 和服务的互通。 +::: + +在多个 K8s 集群上部署工作负载可以增强系统的扩展性、可靠性和安全性。通过将不同的 obzone 部署到不同的 K8s 集群,可以充分发挥 OceanBase 高可用架构的优势,实现集群级别的容灾,并使集群和工作负载的管理更加简单高效。 + +## 整体架构 +![inter-k8s-cluster-architecture](/img/inter-k8s-cluster-architecture.jpg) + +如架构图所示,K8s 集群具有不同角色。我们将部署了 ob-operator 的集群称为主集群(master),其他集群称为工作集群(worker)。 + +通过在主集群中创建类型为 K8sCluster 的自定义资源,可以将工作集群注册进来。ob-operator 使用存储在自定义资源中的凭证访问这些工作集群。OceanBase 的工作负载在工作集群中以原生 K8s 资源运行,而 OceanBase 的自定义资源仍保存在主集群中。 + +## 添加工作 K8s 集群 +要将 K8s 集群添加为工作集群,请确保凭证具有以下权限:`get`, `list`, `watch`, `create`, `update`, `patch` 和 `delete` 以下资源,`pod`, `service`, `pvc`, `job` 和 `namespace`。参考以下示例,将 kubeConfig 下的占位符替换为工作集群的凭证,并将其应用于主集群。 +```yaml k8s_cluster.yaml +apiVersion: k8s.oceanbase.com/v1alpha1 +kind: K8sCluster +metadata: + name: k8s-remote +spec: + name: remote + description: "This is the remote k8s cluster for testing" + kubeConfig: | + # Typically you can found it in ~/.kube/config +``` + +使用以下命令检查资源 +```bash +kubectl get k8scluster +``` + +预期输出如下 +```bash +NAME AGE CLUSTERNAME +k8s-remote 1m remote +``` + +## 创建多个 K8s 集群中运行的 OceanBase 集群 + +要在多个 K8s 集群中创建 OceanBase 集群,与单 K8s 集群中创建集群唯一的区别是为运行在工作集群中的 obzone 指定运行的 K8s 集群。请参考以下示例配置 + +```yaml multi-k8s-cluster.yaml +apiVersion: oceanbase.oceanbase.com/v1alpha1 +kind: OBCluster +metadata: + name: test + namespace: default + # annotations: + # "oceanbase.oceanbase.com/independent-pvc-lifecycle": "true" + # "oceanbase.oceanbase.com/mode": "service" +spec: + clusterName: test + clusterId: 1 + userSecrets: + root: root-password + topology: + - zone: zone1 + replica: 1 + - zone: zone2 + replica: 1 + k8sCluster: k8s-cluster-hz + - zone: zone3 + replica: 1 + k8sCluster: k8s-cluster-sh + observer: + image: oceanbase/oceanbase-cloud-native:4.2.1.7-107000162024060611 + resource: + cpu: 2 + memory: 10Gi + storage: + dataStorage: + storageClass: local-path + size: 50Gi + redoLogStorage: + storageClass: local-path + size: 50Gi + logStorage: + storageClass: local-path + size: 20Gi + parameters: + - name: system_memory + value: 1G + - name: "__min_full_resource_pool_memory" + value: "2147483648" # 2G +``` +## 管理多 K8s 集群中运行的 OceanBase 集群 +OceanBase 的管理方式和单个 K8s 集群中运行没有其他区别,只需要修改主集群中的自定义资源,ob-operator 会负责将相应更改同步到各个工作集群中。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md index 48a3cd9ec..2c7ec56f9 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md @@ -26,7 +26,7 @@ sidebar_position: 1 创建 namespace。 ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/namespace.yaml ``` 使用以下命令查看创建的 namespace: @@ -45,7 +45,7 @@ oceanbase Active 98s 创建集群和租户的 secret: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/secret.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/secret.yaml ``` 通过以下命令查看创建的 secret: @@ -72,7 +72,7 @@ ob-configserver 是提供 OceanBase rootservice 信息注册和查询的服务 使用如下命令部署 ob-configserver 以及创建对应的 service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/configserver.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/configserver.yaml ``` 检查 pod 状态: @@ -100,7 +100,7 @@ svc-ob-configserver NodePort 10.96.3.39 8080:30080/TCP 98s 使用如下命令部署 OceanBase 集群: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/obcluster.yaml ``` 轮询使用如下命令检查 obcluster 状态,直到集群变成 running 状态。 @@ -120,7 +120,7 @@ ObProxy 支持使用 ob-configserver 或者直接指定 rs_list 的形式启动 使用如下命令部署 ObProxy 以及创建 service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/obproxy.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/obproxy.yaml ``` 查看 ObProxy 的 pod 状态,会有两个 obproxy 的 pod。 @@ -164,7 +164,7 @@ mysql -h${obproxy-service-address} -P2883 -uroot@sys#metadb -p 使用如下命令创建租户: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/tenant.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/tenant.yaml ``` 创建后轮询租户的资源状态, 当变成 running 时表示租户以及创建完成了 @@ -200,7 +200,7 @@ create database dev; 使用如下命令部署应用: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/oceanbase-todo.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/oceanbase-todo.yaml ``` 部署成功之后,可以通过如下命令进行查看部署的状态: @@ -253,7 +253,7 @@ $ curl http://10.43.39.231:20031 使用如下命令部署 prometheus: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/prometheus.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/prometheus.yaml ``` 使用如下命令查看部署状态: @@ -275,7 +275,7 @@ grafana 可以使用 prometheus 作为数据源,进行 OceanBase 指标的展 使用如下命令部署 grafana: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/webapp/grafana.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/webapp/grafana.yaml ``` 使用如下命令查看部署状态: @@ -301,4 +301,4 @@ svc-grafana NodePort 10.96.2.145 3000:30030/TCP 2m ## 说明 -本文中的配置文件均可在 [webapp 配置文件](https://github.com/oceanbase/ob-operator/tree/2.2.2_release/example/webapp) 目录中找到。 +本文中的配置文件均可在 [webapp 配置文件](https://github.com/oceanbase/ob-operator/tree/2.3.0_release/example/webapp) 目录中找到。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/changelog.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/changelog.md index 6a48d2297..1a28cc91a 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/changelog.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/changelog.md @@ -1,5 +1,23 @@ # 变更日志 +## 2.3.0 (发布于 2024.10.14) + +### 新增特性 + +1. 支持跨 K8s 集群调度 OceanBase 集群 +2. 支持设置腾讯云 COS、AWS s3 以及 s3 兼容的对象存储服务作为数据备份的介质 +3. 支持删除特定的 OBServer +4. 支持根据场景设置优化 OceanBase 集群的系统参数和变量 +5. 支持将大部分 K8s 内置的 Pod 字段设置到 OBServer 中 + +### 缺陷修复 + +1. 修复 2-2-2 集群滚动替换 OBServer 时可能出现卡住的问题 + +### 功能优化 + +1. 补充了几种新的 `OBTenantOperation` 类型用于执行常见操作,如创建或删除资源池、设置 Unit Number 等 + ## 2.2.2 (发布于 2024.06.18) ### 新增特性 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx index 797dae269..23e1d7a88 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx @@ -19,7 +19,7 @@ import Link from '@docusaurus/Link' ob-operator 依赖 [cert-manager](https://cert-manager.io/docs/), cert-manager 的安装可以参考对应的[安装文档](https://cert-manager.io/docs/installation/),如果您无法访问官方制品托管在 `quay.io` 镜像站的镜像,可通过下面的指令安装我们转托在 `docker.io` 中的制品: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` 本例子中的 OceanBase 集群存储依赖 [local-path-provisioner](https://github.com/rancher/local-path-provisioner) 提供, 需要提前进行安装并确保其存储目的地有足够大的磁盘空间。如果您计划在生产环境部署,推荐使用其他的存储解决方案。我们在[存储兼容性](#存储兼容性)一节提供了我们测试过的存储兼容性结果。 @@ -33,7 +33,7 @@ kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_r - 稳定版本 ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - 开发版本 @@ -49,7 +49,7 @@ Helm Chart 将 ob-operator 部署的命名空间进行了参数化,可在安 ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` #### 使用 terraform @@ -101,7 +101,7 @@ kubectl create secret generic root-password --from-literal=password='root_passwo 通过以下命令即可在 K8s 集群中部署 OceanBase: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` 一般初始化集群需要 2 分钟左右的时间,执行以下命令,查询集群状态,当集群状态变成 running 之后表示集群创建和初始化成功: diff --git a/docsite/src/pages/changelog.md b/docsite/src/pages/changelog.md index dcb979b60..7934762db 100644 --- a/docsite/src/pages/changelog.md +++ b/docsite/src/pages/changelog.md @@ -1,5 +1,23 @@ # Changelog +## 2.3.0 (Release on 2024.10.14) + +### New Features + +1. Support for scheduling OceanBase cluster across multiple K8s clusters. +2. Support for backing up to Tencent COS, AWS s3 and s3 compatible storage. +3. Support for deleting specific OBServer. +4. Support for optimizing parameters and variables by scenario. +5. Support for setting most of native fields of Pods to OBServer. + +### Bug fixes + +1. Fixed the issue that it get stuck when a 2-2-2 cluster rolling replace its OBServer pods. + +### Optimization + +1. Supplement several new types of `OBTenantOperation` to perform common operations like creating or deleting resource pools, setting unit number and so on. + ## 2.2.2 (Release on 2024.06.18) ### New Features diff --git a/docsite/src/pages/index.mdx b/docsite/src/pages/index.mdx index 780f3df93..4c92b9933 100644 --- a/docsite/src/pages/index.mdx +++ b/docsite/src/pages/index.mdx @@ -20,7 +20,7 @@ ob-operator relies on [cert-manager](https://cert-manager.io/docs/) for certific If you have trouble accessing `quay.io` image registry, our mirrored cert-manager manifests can be applied through following command: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` Storage of OceanBase cluster in this example relies on [local-path-provisioner](https://github.com/rancher/local-path-provisioner), which should be installed beforehand. You should confirm that there is enough disk space in storage destination of local-path-provisioner.If you decide to deploy OceanBase cluster in production environment, it is recommended to use other storage solutions. We have provided a compatible table for storage solutions that we tested in section [Storage Compatibility](#storage-compatibility). @@ -34,7 +34,7 @@ You can deploy ob-operator in a Kubernetes cluster by executing the following co - Stable ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/operator.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/operator.yaml ``` - Development @@ -50,7 +50,7 @@ Helm Chart parameterizes the namespace in which ob-operator is deployed, allowin ```shell helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update -helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.2.2 +helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system --create-namespace --version=2.3.0 ``` #### Using terraform @@ -102,7 +102,7 @@ kubectl create secret generic root-password --from-literal=password='root_passwo You can deploy OceanBase in a Kubernetes cluster by executing the following command: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/example/quickstart/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/example/quickstart/obcluster.yaml ``` It generally takes around 2 minutes to bootstrap a cluster. Execute the following command to check the status of the cluster. Once the cluster status changes to "running," it indicates that the cluster has been successfully created and bootstrapped: diff --git a/docsite/static/img/inter-k8s-cluster-architecture.jpg b/docsite/static/img/inter-k8s-cluster-architecture.jpg new file mode 100644 index 000000000..e7abc31b8 Binary files /dev/null and b/docsite/static/img/inter-k8s-cluster-architecture.jpg differ diff --git a/example/openstack/README.md b/example/openstack/README.md index 31581d060..644e98b5e 100644 --- a/example/openstack/README.md +++ b/example/openstack/README.md @@ -11,13 +11,13 @@ This folder contains configuration files to deploy OceanBase and OpenStack on Ku 1. Deploy cert-manager Deploy the cert-manager using the following command. Ensure all pods are running before proceeding to the next step: ``` -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` 2. deploy ob-operator Deploy the ob-operator using the command below. Wait until all pods are running: ``` -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.2_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.3.0_release/deploy/cert-manager.yaml ``` 3. create secret Create secret using the following command diff --git a/internal/resource/observer/observer_task.go b/internal/resource/observer/observer_task.go index e44e55d75..189cd004d 100644 --- a/internal/resource/observer/observer_task.go +++ b/internal/resource/observer/observer_task.go @@ -114,8 +114,10 @@ func CreateOBServerPod(m *OBServerManager) tasktypes.TaskError { annotations := m.generateStaticIpAnnotation() ownerReferenceList = append(ownerReferenceList, ownerReference) observerPodSpec := m.createOBPodSpec(obcluster) - originLabels := m.OBServer.Labels - originLabels[oceanbaseconst.LabelOBServerUID] = string(m.OBServer.UID) + podLabels := m.OBServer.Labels + podLabels[oceanbaseconst.LabelRefUID] = string(m.OBServer.UID) + podLabels[oceanbaseconst.LabelOBServerUID] = string(m.OBServer.UID) // For compatibility with old version + podLabels[oceanbaseconst.LabelRefOBServer] = string(m.OBServer.Name) podFields := m.OBServer.Spec.OBServerTemplate.PodFields if podFields != nil { @@ -127,8 +129,8 @@ func CreateOBServerPod(m *OBServerManager) tasktypes.TaskError { observerPodSpec.Subdomain = varsReplacer.Replace(*podFields.Subdomain) } for k := range podFields.Labels { - if _, exist := originLabels[k]; !exist { - originLabels[k] = varsReplacer.Replace(podFields.Labels[k]) + if _, exist := podLabels[k]; !exist { + podLabels[k] = varsReplacer.Replace(podFields.Labels[k]) } } for k := range podFields.Annotations { @@ -144,7 +146,7 @@ func CreateOBServerPod(m *OBServerManager) tasktypes.TaskError { Name: m.OBServer.Name, Namespace: m.OBServer.Namespace, OwnerReferences: ownerReferenceList, - Labels: originLabels, + Labels: podLabels, Annotations: annotations, }, Spec: observerPodSpec, @@ -171,6 +173,10 @@ func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { ownerReferenceList = append(ownerReferenceList, ownerReference) } singlePvcAnnoVal, singlePvcExist := resourceutils.GetAnnotationField(m.OBServer, oceanbaseconst.AnnotationsSinglePVC) + pvcLabels := m.OBServer.Labels + pvcLabels[oceanbaseconst.LabelRefUID] = string(m.OBServer.UID) + pvcLabels[oceanbaseconst.LabelRefOBServer] = string(m.OBServer.Name) + if singlePvcExist && singlePvcAnnoVal == "true" { sumQuantity := resource.Quantity{} sumQuantity.Add(m.OBServer.Spec.OBServerTemplate.Storage.DataStorage.Size) @@ -185,7 +191,7 @@ func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { Name: m.OBServer.Name, Namespace: m.OBServer.Namespace, OwnerReferences: ownerReferenceList, - Labels: m.OBServer.Labels, + Labels: pvcLabels, }, Spec: m.generatePVCSpec(storageSpec), } @@ -198,7 +204,7 @@ func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { Name: fmt.Sprintf("%s-%s", m.OBServer.Name, oceanbaseconst.DataVolumeSuffix), Namespace: m.OBServer.Namespace, OwnerReferences: ownerReferenceList, - Labels: m.OBServer.Labels, + Labels: pvcLabels, } pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: objectMeta, @@ -213,7 +219,7 @@ func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { Name: fmt.Sprintf("%s-%s", m.OBServer.Name, oceanbaseconst.ClogVolumeSuffix), Namespace: m.OBServer.Namespace, OwnerReferences: ownerReferenceList, - Labels: m.OBServer.Labels, + Labels: pvcLabels, } pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: objectMeta, @@ -228,7 +234,7 @@ func CreateOBServerPVC(m *OBServerManager) tasktypes.TaskError { Name: fmt.Sprintf("%s-%s", m.OBServer.Name, oceanbaseconst.LogVolumeSuffix), Namespace: m.OBServer.Namespace, OwnerReferences: ownerReferenceList, - Labels: m.OBServer.Labels, + Labels: pvcLabels, } pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: objectMeta, @@ -516,11 +522,14 @@ func CreateOBServerSvc(m *OBServerManager) tasktypes.TaskError { mode, modeAnnoExist := resourceutils.GetAnnotationField(m.OBServer, oceanbaseconst.AnnotationsMode) if modeAnnoExist && mode == oceanbaseconst.ModeService { m.Logger.Info("Create observer service") + svcLabels := m.OBServer.Labels + svcLabels[oceanbaseconst.LabelRefUID] = string(m.OBServer.UID) + svcLabels[oceanbaseconst.LabelRefOBServer] = string(m.OBServer.Name) svc := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: m.OBServer.Name, Namespace: m.OBServer.Namespace, - Labels: m.OBServer.Labels, + Labels: svcLabels, OwnerReferences: []metav1.OwnerReference{{ APIVersion: m.OBServer.APIVersion, Kind: m.OBServer.Kind, diff --git a/internal/resource/observer/utils.go b/internal/resource/observer/utils.go index 4347d04d6..2d9d824eb 100644 --- a/internal/resource/observer/utils.go +++ b/internal/resource/observer/utils.go @@ -141,7 +141,7 @@ func (m *OBServerManager) setRecoveryStatus() { func (m *OBServerManager) getPVCs() (*corev1.PersistentVolumeClaimList, error) { pvcs := &corev1.PersistentVolumeClaimList{} - err := m.K8sResClient.List(m.Ctx, pvcs, client.InNamespace(m.OBServer.Namespace), client.MatchingLabels{oceanbaseconst.LabelRefUID: m.OBServer.Labels[oceanbaseconst.LabelRefUID]}) + err := m.K8sResClient.List(m.Ctx, pvcs, client.InNamespace(m.OBServer.Namespace), client.MatchingLabels{oceanbaseconst.LabelRefUID: string(m.OBServer.UID)}) if err != nil { return nil, errors.Wrap(err, "list pvc") } @@ -667,7 +667,7 @@ func (m *OBServerManager) cleanWorkerK8sResource() error { pvc := &corev1.PersistentVolumeClaim{} if err := m.K8sResClient.DeleteAllOf(m.Ctx, pvc, client.InNamespace(m.OBServer.Namespace), - client.MatchingLabels{oceanbaseconst.LabelRefUID: m.OBServer.Labels[oceanbaseconst.LabelRefUID]}, + client.MatchingLabels{oceanbaseconst.LabelRefUID: string(m.OBServer.UID)}, ); err != nil { errs = stderrs.Join(errs, errors.Wrap(err, "Failed to delete pvc")) } diff --git a/internal/resource/obtenantrestore/utils.go b/internal/resource/obtenantrestore/utils.go index 55f7dbaee..ac2f3abfb 100644 --- a/internal/resource/obtenantrestore/utils.go +++ b/internal/resource/obtenantrestore/utils.go @@ -21,6 +21,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/oceanbase/ob-operator/api/constants" + apitypes "github.com/oceanbase/ob-operator/api/types" v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" @@ -49,25 +50,8 @@ func (m *ObTenantRestoreManager) getSourceUri() (string, error) { return source.SourceUri, nil } var bakPath, archivePath string - if source.BakDataSource != nil && source.BakDataSource.Type == constants.BackupDestTypeOSS { - accessId, accessKey, err := m.readAccessCredentials(source.BakDataSource.OSSAccessSecret) - if err != nil { - return "", err - } - bakPath = strings.Join([]string{source.BakDataSource.Path, "access_id=" + accessId, "access_key=" + accessKey}, "&") - } else { - bakPath = "file://" + path.Join(oceanbaseconst.BackupPath, source.BakDataSource.Path) - } - - if source.ArchiveSource != nil && source.ArchiveSource.Type == constants.BackupDestTypeOSS { - accessId, accessKey, err := m.readAccessCredentials(source.ArchiveSource.OSSAccessSecret) - if err != nil { - return "", err - } - archivePath = strings.Join([]string{source.ArchiveSource.Path, "access_id=" + accessId, "access_key=" + accessKey}, "&") - } else { - archivePath = "file://" + path.Join(oceanbaseconst.BackupPath, source.ArchiveSource.Path) - } + bakPath = m.getDestPath(source.BakDataSource) + archivePath = m.getDestPath(source.ArchiveSource) if bakPath == "" || archivePath == "" { return "", errors.New("Unexpected error: both bakPath and archivePath must be set") @@ -76,16 +60,27 @@ func (m *ObTenantRestoreManager) getSourceUri() (string, error) { return strings.Join([]string{bakPath, archivePath}, ","), nil } -func (m *ObTenantRestoreManager) readAccessCredentials(secretName string) (accessId, accessKey string, err error) { +func (m *ObTenantRestoreManager) getDestPath(dest *apitypes.BackupDestination) string { + if dest.Type == constants.BackupDestTypeNFS || resourceutils.IsZero(dest.Type) { + return "file://" + path.Join(oceanbaseconst.BackupPath, dest.Path) + } + if dest.OSSAccessSecret == "" { + return "" + } secret := &v1.Secret{} - err = m.Client.Get(m.Ctx, types.NamespacedName{ - Namespace: m.Resource.Namespace, - Name: secretName, + err := m.Client.Get(m.Ctx, types.NamespacedName{ + Namespace: m.Resource.GetNamespace(), + Name: dest.OSSAccessSecret, }, secret) if err != nil { - return "", "", err + m.PrintErrEvent(err) + return "" + } + destPath := strings.Join([]string{dest.Path, "access_id=" + string(secret.Data["accessId"]), "access_key=" + string(secret.Data["accessKey"])}, "&") + if dest.Type == constants.BackupDestTypeCOS { + destPath += ("&appid=" + string(secret.Data["appId"])) + } else if dest.Type == constants.BackupDestTypeS3 { + destPath += ("&s3_region=" + string(secret.Data["s3Region"])) } - accessId = string(secret.Data["accessId"]) - accessKey = string(secret.Data["accessKey"]) - return accessId, accessKey, nil + return destPath } diff --git a/make/deps.mk b/make/deps.mk index ee9c53846..a6cedb1e4 100644 --- a/make/deps.mk +++ b/make/deps.mk @@ -39,7 +39,7 @@ install-delve: ## Install delve, a debugger for the Go programming language. Mor go install github.com/go-delve/delve/cmd/dlv@master .PHONY: tools -tools: kustomize controller-gen envtest install-delve ## Download all tools +tools: kustomize controller-gen envtest ## Download all tools .PHONY: init-generator init-generator: ## Install generator tools diff --git a/make/development.mk b/make/development.mk index 7f6b00e3b..e66f58d29 100644 --- a/make/development.mk +++ b/make/development.mk @@ -35,6 +35,11 @@ test-all: manifests generate fmt vet envtest ## Run all tests including long-run go run github.com/onsi/ginkgo/v2/ginkgo -r --covermode=atomic --coverprofile=cover.profile --cpuprofile=cpu.profile --memprofile=mem.profile --cover \ --output-dir=testreports --keep-going --json-report=report.json --label-filter='$(CASE_LABEL_FILTERS)' --skip-package './distribution' +.PHONY: test-webhooks +test-webhooks: ## Test the webhooks + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" \ + go run github.com/onsi/ginkgo/v2/ginkgo ./api/... + REPORT_PORT ?= 8480 .PHONY: unit-coverage