diff --git a/.github/workflows/release-charts.yml b/.github/workflows/release-charts.yml index 4952b52b0..6d030f9d8 100644 --- a/.github/workflows/release-charts.yml +++ b/.github/workflows/release-charts.yml @@ -24,11 +24,12 @@ jobs: git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Run chart-releaser - uses: powerfooI/chart-releaser-action@v1.6.1 + uses: powerfooI/chart-releaser-action@v1.6.2 env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" with: mark_as_latest: false skip_existing: true pages_branch: master - pages_index_path: docsite/static/index.yaml \ No newline at end of file + pages_index_path: docsite/static/index.yaml + pr: true \ No newline at end of file diff --git a/.github/workflows/release-obproxy.yml b/.github/workflows/release-obproxy.yml index e0ec1b27b..b61321c12 100644 --- a/.github/workflows/release-obproxy.yml +++ b/.github/workflows/release-obproxy.yml @@ -38,6 +38,6 @@ jobs: platforms: linux/amd64,linux/arm64 file: ./distribution/obproxy/Dockerfile push: true - tags: ${{ vars.DOCKER_PUSH_BASE }}/obproxy:${{ steps.set_version_vars.outputs.version }} + tags: ${{ vars.DOCKER_PUSH_BASE }}/obproxy-ce:${{ steps.set_version_vars.outputs.version }} build-args: | VERSION=${{ steps.set_version_vars.outputs.version }} diff --git a/README-CN.md b/README-CN.md index f41dd9d88..37716e72e 100644 --- a/README-CN.md +++ b/README-CN.md @@ -130,7 +130,7 @@ mysql -h{POD_IP} -P2881 -uroot -proot_password oceanbase -A -c ``` helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update ob-operator -helm install oceanbase-dashboard ob-operator/oceanbase-dashboard --version=0.2.0 +helm install oceanbase-dashboard ob-operator/oceanbase-dashboard ``` ![oceanbase-dashboard-install](./docsite/static/img/oceanbase-dashboard-install.jpg) @@ -147,6 +147,7 @@ kubectl get svc oceanbase-dashboard-oceanbase-dashboard 使用 admin 账号和查看到的密码登录。 ![oceanbase-dashboard-overview](./docsite/static/img/oceanbase-dashboard-overview.jpg) +![oceanbase-dashboard-topology](./docsite/static/img/oceanbase-dashboard-topology.jpg) ## 项目架构 diff --git a/README.md b/README.md index db72f4abd..b7d14306b 100644 --- a/README.md +++ b/README.md @@ -129,7 +129,7 @@ Deploy OceanBase Dashboard is pretty simple, just run the following commands ``` helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update ob-operator -helm install oceanbase-dashboard ob-operator/oceanbase-dashboard --version=0.2.0 +helm install oceanbase-dashboard ob-operator/oceanbase-dashboard ``` ![oceanbase-dashboard-install](./docsite/static/img/oceanbase-dashboard-install.jpg) @@ -146,6 +146,7 @@ kubectl get svc oceanbase-dashboard-oceanbase-dashboard Login with admin user and password ![oceanbase-dashboard-overview](./docsite/static/img/oceanbase-dashboard-overview.jpg) +![oceanbase-dashboard-topology](./docsite/static/img/oceanbase-dashboard-topology.jpg) ## Project Architecture diff --git a/api/v1alpha1/obcluster_webhook.go b/api/v1alpha1/obcluster_webhook.go index ff5d9885f..3053b4d5e 100644 --- a/api/v1alpha1/obcluster_webhook.go +++ b/api/v1alpha1/obcluster_webhook.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" apitypes "github.com/oceanbase/ob-operator/api/types" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" ) @@ -61,7 +62,7 @@ func (r *OBCluster) Default() { parameterMap := make(map[string]apitypes.Parameter, 0) memorySize, ok := r.Spec.OBServerTemplate.Resource.Memory.AsInt64() if ok { - memoryLimit := fmt.Sprintf("%dM", memorySize*oceanbaseconst.DefaultMemoryLimitPercent/100/oceanbaseconst.MegaConverter) + memoryLimit := fmt.Sprintf("%dM", memorySize*int64(obcfg.GetConfig().Resource.DefaultMemoryLimitPercent)/100/oceanbaseconst.MegaConverter) parameterMap["memory_limit"] = apitypes.Parameter{ Name: "memory_limit", Value: memoryLimit, @@ -71,12 +72,12 @@ func (r *OBCluster) Default() { } datafileDiskSize, ok := r.Spec.OBServerTemplate.Storage.DataStorage.Size.AsInt64() if ok { - datafileMaxSize := fmt.Sprintf("%dG", datafileDiskSize*oceanbaseconst.DefaultDiskUsePercent/oceanbaseconst.GigaConverter/100) + datafileMaxSize := fmt.Sprintf("%dG", datafileDiskSize*int64(obcfg.GetConfig().Resource.DefaultDiskUsePercent)/oceanbaseconst.GigaConverter/100) parameterMap["datafile_maxsize"] = apitypes.Parameter{ Name: "datafile_maxsize", Value: datafileMaxSize, } - datafileNextSize := fmt.Sprintf("%dG", datafileDiskSize*oceanbaseconst.DefaultDiskExpandPercent/oceanbaseconst.GigaConverter/100) + datafileNextSize := fmt.Sprintf("%dG", datafileDiskSize*int64(obcfg.GetConfig().Resource.DefaultDiskExpandPercent)/oceanbaseconst.GigaConverter/100) parameterMap["datafile_next"] = apitypes.Parameter{ Name: "datafile_next", Value: datafileNextSize, @@ -92,7 +93,7 @@ func (r *OBCluster) Default() { logSize, ok := r.Spec.OBServerTemplate.Storage.LogStorage.Size.AsInt64() if ok { // observer has 4 types of log and one logfile limits at 256M considering about wf, maximum of 2G will be occupied for 1 syslog count - maxSysLogFileCount = logSize * oceanbaseconst.DefaultLogPercent / oceanbaseconst.GigaConverter / 100 / 2 + maxSysLogFileCount = logSize * int64(obcfg.GetConfig().Resource.DefaultLogPercent) / oceanbaseconst.GigaConverter / 100 / 2 } parameterMap["max_syslog_file_count"] = apitypes.Parameter{ Name: "max_syslog_file_count", @@ -275,18 +276,17 @@ func (r *OBCluster) validateMutation() error { } // Validate disk size - if r.Spec.OBServerTemplate.Storage.DataStorage.Size.AsApproximateFloat64() < oceanbaseconst.MinDataDiskSize.AsApproximateFloat64() { + if r.Spec.OBServerTemplate.Storage.DataStorage.Size.Cmp(resource.MustParse(obcfg.GetConfig().Resource.MinDataDiskSize)) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("dataStorage").Child("size"), r.Spec.OBServerTemplate.Storage.DataStorage.Size.String(), "The minimum data storage size of OBCluster is "+oceanbaseconst.MinDataDiskSize.String())) } - if r.Spec.OBServerTemplate.Storage.RedoLogStorage.Size.AsApproximateFloat64() < oceanbaseconst.MinRedoLogDiskSize.AsApproximateFloat64() { + if r.Spec.OBServerTemplate.Storage.RedoLogStorage.Size.Cmp(resource.MustParse(obcfg.GetConfig().Resource.MinRedoLogDiskSize)) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("redoLogStorage").Child("size"), r.Spec.OBServerTemplate.Storage.RedoLogStorage.Size.String(), "The minimum redo log storage size of OBCluster is "+oceanbaseconst.MinRedoLogDiskSize.String())) } - if r.Spec.OBServerTemplate.Storage.LogStorage.Size.AsApproximateFloat64() < oceanbaseconst.MinLogDiskSize.AsApproximateFloat64() { + if r.Spec.OBServerTemplate.Storage.LogStorage.Size.Cmp(resource.MustParse(obcfg.GetConfig().Resource.MinLogDiskSize)) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("storage").Child("logStorage").Child("size"), r.Spec.OBServerTemplate.Storage.LogStorage.Size.String(), "The minimum log storage size of OBCluster is "+oceanbaseconst.MinLogDiskSize.String())) } - // Validate memory size - if r.Spec.OBServerTemplate.Resource.Memory.AsApproximateFloat64() < oceanbaseconst.MinMemorySize.AsApproximateFloat64() { + if r.Spec.OBServerTemplate.Resource.Memory.Cmp(resource.MustParse(obcfg.GetConfig().Resource.MinMemorySize)) < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("observer").Child("resource").Child("memory"), r.Spec.OBServerTemplate.Resource.Memory.String(), "The minimum memory size of OBCluster is "+oceanbaseconst.MinMemorySize.String())) } diff --git a/charts/ob-operator/templates/NOTES.txt b/charts/ob-operator/templates/NOTES.txt new file mode 100644 index 000000000..923ddfc12 --- /dev/null +++ b/charts/ob-operator/templates/NOTES.txt @@ -0,0 +1,33 @@ + _ _ + ___ | |__ ___ _ __ ___ _ __ __ _| |_ ___ _ __ + / _ \| '_ \ _____ / _ \| '_ \ / _ \ '__/ _` | __/ _ \| '__| +| (_) | |_) |_____| (_) | |_) | __/ | | (_| | || (_) | | + \___/|_.__/ \___/| .__/ \___|_| \__,_|\__\___/|_| + |_| + +Welcome to ob-operator! We are so happy to see you here! Once ob-operator is installed, you can explore OceanBase database on your Kubernetes cluster with ease. + +The following steps will guide you through the OceanBase database deployment: + +1. Quick Start - Deploy a single-node OceanBase database for testing + + https://oceanbase.github.io/ob-operator/docs/manual/quick-start-of-ob-operator + +2. Advanced - Create an OceanBase database with customized configurations + + https://oceanbase.github.io/ob-operator/docs/manual/ob-operator-user-guide/cluster-management-of-ob-operator/create-cluster + +3. Tenants - Create and manage tenants in OceanBase database + + https://oceanbase.github.io/ob-operator/docs/manual/ob-operator-user-guide/tenant-management-of-ob-operator/tenant-management-intro + +4. High availability - Enable high availability for OceanBase on K8s + + https://oceanbase.github.io/ob-operator/docs/manual/ob-operator-user-guide/high-availability/high-availability-intro + +5. Get help from the community + + Feel free to ask questions or report issues on GitHub: https://github.com/oceanbase/ob-operator/issues + Other ways to get help: https://oceanbase.github.io/ob-operator/#getting-help + +For more information, please visit our website: https://oceanbase.github.io/ob-operator diff --git a/charts/oceanbase-cluster/templates/NOTES.txt b/charts/oceanbase-cluster/templates/NOTES.txt index ef0f0656d..b4cfad1f2 100644 --- a/charts/oceanbase-cluster/templates/NOTES.txt +++ b/charts/oceanbase-cluster/templates/NOTES.txt @@ -1,3 +1,9 @@ + ___ ____ ____ _ _ + / _ \ ___ ___ __ _ _ __ | __ ) __ _ ___ ___ / ___| |_ _ ___| |_ ___ _ __ +| | | |/ __/ _ \/ _` | '_ \| _ \ / _` / __|/ _ \ | | | | | | / __| __/ _ \ '__| +| |_| | (_| __/ (_| | | | | |_) | (_| \__ \ __/ | |___| | |_| \__ \ || __/ | + \___/ \___\___|\__,_|_| |_|____/ \__,_|___/\___| \____|_|\__,_|___/\__\___|_| + Welcome to OceanBase Cluster! After installing OBCluster chart, you need to wait for the cluster bootstrapped. Bootstrap progress will cost approximately 2~3 minutes which may vary depends on the machine. diff --git a/charts/oceanbase-dashboard/Chart.yaml b/charts/oceanbase-dashboard/Chart.yaml index 570a49e1a..cef1b1e74 100644 --- a/charts/oceanbase-dashboard/Chart.yaml +++ b/charts/oceanbase-dashboard/Chart.yaml @@ -15,10 +15,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.2.0 +version: 0.2.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.2.0" +appVersion: "0.2.1" diff --git a/charts/oceanbase-dashboard/templates/NOTES.txt b/charts/oceanbase-dashboard/templates/NOTES.txt index d863a9a23..d58b89a07 100644 --- a/charts/oceanbase-dashboard/templates/NOTES.txt +++ b/charts/oceanbase-dashboard/templates/NOTES.txt @@ -1,4 +1,16 @@ -Welcome to OceanBase dashboard + ___ ____ + / _ \ ___ ___ __ _ _ __ | __ ) __ _ ___ ___ +| | | |/ __/ _ \/ _` | '_ \| _ \ / _` / __|/ _ \ +| |_| | (_| __/ (_| | | | | |_) | (_| \__ \ __/ + \___/ \___\___|\__,_|_| |_|____/ \__,_|___/\___| + + ____ _ _ _ +| _ \ __ _ ___| |__ | |__ ___ __ _ _ __ __| | +| | | |/ _` / __| '_ \| '_ \ / _ \ / _` | '__/ _` | +| |_| | (_| \__ \ | | | |_) | (_) | (_| | | | (_| | +|____/ \__,_|___/_| |_|_.__/ \___/ \__,_|_| \__,_| + +Welcome to OceanBase dashboard! 1. After installing the dashboard chart, you can use `port-forward` to expose the dashboard outside like: diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 085f807d7..30cae0957 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -18,11 +18,11 @@ package main import ( "context" - "flag" "os" //+kubebuilder:scaffold:imports + "github.com/spf13/pflag" "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -33,9 +33,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" "github.com/oceanbase/ob-operator/internal/controller" "github.com/oceanbase/ob-operator/internal/controller/config" "github.com/oceanbase/ob-operator/internal/telemetry" + "github.com/oceanbase/ob-operator/pkg/coordinator" ) var ( @@ -66,15 +68,15 @@ func main() { var enableLeaderElection bool var probeAddr string var logVerbosity int - flag.StringVar(&namespace, "namespace", "", "The namespace to run oceanbase, default value is empty means all.") - flag.StringVar(&managerNamespace, "manager-namespace", "oceanbase-system", "The namespace to run manager tools.") - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") - flag.BoolVar(&enableLeaderElection, "leader-elect", false, + pflag.StringVar(&namespace, "namespace", "", "The namespace to run oceanbase, default value is empty means all.") + pflag.StringVar(&managerNamespace, "manager-namespace", "oceanbase-system", "The namespace to run manager tools.") + pflag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + pflag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + pflag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.IntVar(&logVerbosity, "log-verbosity", 0, "Log verbosity level, 0 is info, 1 is debug, 2 is trace") - flag.Parse() + pflag.IntVar(&logVerbosity, "log-verbosity", 0, "Log verbosity level, 0 is info, 1 is debug, 2 is trace") + pflag.Parse() opts := zap.Options{ Development: logVerbosity > 0, @@ -86,6 +88,10 @@ func main() { }, } + cfg := obcfg.GetConfig() + coordinator.SetMaxRetryTimes(cfg.Time.TaskMaxRetryTimes) + coordinator.SetRetryBackoffThreshold(cfg.Time.TaskRetryBackoffThreshold) + ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ diff --git a/docsite/docs/developer/deploy-locally.md b/docsite/docs/developer/deploy-locally.md index c00be9f81..4fc245903 100644 --- a/docsite/docs/developer/deploy-locally.md +++ b/docsite/docs/developer/deploy-locally.md @@ -40,14 +40,14 @@ Tips: Perform `minikube dashboard` to open kubernetes dashboard, everything in t ob-operator depends on `cert-manager` to enable TLS functionalities, so we should install it first. ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.1_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/cert-manager.yaml ``` ### 4. Install ob-operator For robustness, default memory limit of ob-operator container is set to `1Gi` which is too large for us developing locally. We recommend fetching the manifests to local and configure it. wget tool could be useful here, while opening the URL and copying the contents to local file is more straight. -https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.1_release/deploy/operator.yaml +https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml Search the pattern `/manager`, find the target container, configure the memory limit to `400Mi` and cpu limit to `400m`. diff --git a/docsite/docs/manual/200.quick-start-of-ob-operator.md b/docsite/docs/manual/200.quick-start-of-ob-operator.md index 9e23c5f40..4e43e36e4 100644 --- a/docsite/docs/manual/200.quick-start-of-ob-operator.md +++ b/docsite/docs/manual/200.quick-start-of-ob-operator.md @@ -21,7 +21,7 @@ Run the following command to deploy ob-operator in the Kubernetes cluster: - Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` - Deploy the developing version of ob-operator @@ -61,7 +61,7 @@ Perform the following steps to deploy an OceanBase cluster in the Kubernetes clu Run the following command to deploy an OceanBase cluster in the Kubernetes cluster: ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/quickstart/obcluster.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/quickstart/obcluster.yaml ``` In general, it takes about 2 minutes to create a cluster. Run the following command to check the cluster status: diff --git a/docsite/docs/manual/300.deploy-ob-operator.md b/docsite/docs/manual/300.deploy-ob-operator.md index 6e3ec57e0..5f58de457 100644 --- a/docsite/docs/manual/300.deploy-ob-operator.md +++ b/docsite/docs/manual/300.deploy-ob-operator.md @@ -33,7 +33,7 @@ You can deploy ob-operator by using the configuration file for the stable or dev * Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` * Deploy the developing version of ob-operator diff --git a/docsite/docs/manual/400.ob-operator-upgrade.md b/docsite/docs/manual/400.ob-operator-upgrade.md index 003060d5e..8ada5a656 100644 --- a/docsite/docs/manual/400.ob-operator-upgrade.md +++ b/docsite/docs/manual/400.ob-operator-upgrade.md @@ -17,7 +17,7 @@ If you upgrade ob-operator by using configuration files, you only need to reappl - Deploy the stable version of ob-operator ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` - Deploy the developing version of ob-operator diff --git a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md index 4cfc0e5a7..ae755b8fa 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md @@ -16,7 +16,7 @@ Before you create a tenant, make sure the following conditions are met: ## Create a tenant by using the configuration file -You can create a tenant by using the configuration file of the tenant. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.1.0_release/deploy/tenant.yaml). +You can create a tenant by using the configuration file of the tenant. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.0_release/deploy/tenant.yaml). Run the following command to create a tenant. This command creates an OceanBase Database tenant with custom resources in the current Kubernetes cluster. diff --git a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md index 0c62edbeb..79869124b 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md @@ -8,7 +8,7 @@ This topic describes how to use ob-operator to delete a tenant from a Kubernetes ## Procedure -You can delete the specified tenant resources from the cluster by using the configuration file `tenant.yaml`. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.1.0_release/deploy/tenant.yaml). +You can delete the specified tenant resources from the cluster by using the configuration file `tenant.yaml`. For more information about the configuration file, visit [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.0_release/deploy/tenant.yaml). Run the following command to delete a tenant. This command deletes an OceanBase Database tenant with custom resources in the current Kubernetes cluster. diff --git a/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md index 6b1bc0a9c..4351cc0ae 100644 --- a/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md +++ b/docsite/docs/manual/500.ob-operator-user-guide/300.high-availability/100.high-availability-intro.md @@ -6,6 +6,6 @@ sidebar_position: 1 ob-operator ensures the high availability of data by using the following features of OceanBase Database. -* Node fault recovery. The distributed architecture of OceanBase Database allows you to restore the service when a minority of nodes fail. By relying on certain network plugins, you can even restore the service from majority nodes failure. For more information, see [Restore service from faults](300.disaster-recovery-of-ob-operator.md). +* Node fault recovery. The distributed architecture of OceanBase Database allows you to restore the service when a minority of nodes fail. By relying on certain network plugins, you can even restore the service from majority nodes failure. For more information, see [Recover from node failure](300.disaster-recovery-of-ob-operator.md). * Backup and restore of tenant data. The backup and restore feature of OceanBase Database allows you to back up tenant data to different storage media to ensure data safety. For more information, see [Back up a tenant](400.tenant-backup-of-ob-operator.md). * Primary and standby tenants. OceanBase Database allows you to create a standby tenant for the primary tenant. When a fault occurs to the primary tenant, you can quickly switch your business to the standby tenant to reduce the business interruption. For more information, see [Physical standby database](600.standby-tenant-of-ob-operator.md). diff --git a/docsite/docs/manual/900.appendix/100.example.md b/docsite/docs/manual/900.appendix/100.example.md index dd7cf6028..231a1fca3 100644 --- a/docsite/docs/manual/900.appendix/100.example.md +++ b/docsite/docs/manual/900.appendix/100.example.md @@ -27,7 +27,7 @@ In this example, the following components are deployed: Create a namespace: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/namespace.yaml ``` View the created namespace: @@ -46,7 +46,7 @@ oceanbase Active 98s Create secrets for the cluster and tenants: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/secret.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/secret.yaml ``` View the created secrets: @@ -73,7 +73,7 @@ ob-configserver allows you to register, store, and query metadata of the RootSer Run the following command to deploy ob-configserver and create the corresponding service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/configserver.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/configserver.yaml ``` Check the pod status: @@ -101,7 +101,7 @@ When you deploy an OceanBase cluster, add environment variables and set the syst Deploy the OceanBase cluster: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/obcluster.yaml ``` Run the following command to query the status of the OceanBase cluster until the status becomes `running`: @@ -121,7 +121,7 @@ You can start ODP by using ob-configserver or specifying the RS list. To maximiz Run the following command to deploy ODP and create the ODP service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/obproxy.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/obproxy.yaml ``` When you query the pod status of ODP, you can see two ODP pods. @@ -165,7 +165,7 @@ You can create a dedicated tenant for each type of business for better resource Run the following command to create a tenant: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/tenant.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/tenant.yaml ``` Run the following command to query the status of the tenant until the status becomes `running`: @@ -204,7 +204,7 @@ create database ocp_monitordb; Run the following command to deploy the application: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/ocp.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/ocp.yaml ``` After the deployment process is completed, run the following command to view the application status: @@ -237,7 +237,7 @@ When you deploy the OceanBase cluster, an OBAgent sidecar container is created i Run the following command to deploy Prometheus: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/prometheus.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/prometheus.yaml ``` Run the following command to view the deployment status: @@ -259,7 +259,7 @@ Grafana displays the metrics of OceanBase Database by using Prometheus as a data Run the following command to deploy Grafana: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/grafana.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/grafana.yaml ``` Run the following command to view the deployment status: @@ -285,4 +285,4 @@ This topic describes how to deploy OceanBase Database and related components suc ## Note -You can find all configuration files used in this topic in the [webapp](https://github.com/oceanbase/ob-operator/tree/2.1.0_release/example/webapp) directory. +You can find all configuration files used in this topic in the [webapp](https://github.com/oceanbase/ob-operator/tree/2.2.0_release/example/webapp) directory. diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md index c00be9f81..4fc245903 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/developer/deploy-locally.md @@ -40,14 +40,14 @@ Tips: Perform `minikube dashboard` to open kubernetes dashboard, everything in t ob-operator depends on `cert-manager` to enable TLS functionalities, so we should install it first. ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.1_release/deploy/cert-manager.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/cert-manager.yaml ``` ### 4. Install ob-operator For robustness, default memory limit of ob-operator container is set to `1Gi` which is too large for us developing locally. We recommend fetching the manifests to local and configure it. wget tool could be useful here, while opening the URL and copying the contents to local file is more straight. -https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.1_release/deploy/operator.yaml +https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml Search the pattern `/manager`, find the target container, configure the memory limit to `400Mi` and cpu limit to `400m`. diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md index 49f5a84e4..0ed1d8c68 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/200.quick-start-of-ob-operator.md @@ -21,7 +21,7 @@ sidebar_position: 2 - 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` - 开发版本 @@ -61,7 +61,7 @@ oceanbase-controller-manager-86cfc8f7bf-4hfnj 2/2 Running 0 1m 使用以下命令在 Kubernetes 集群上部署 OceanBase 集群: ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/quickstart/obcluster.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/quickstart/obcluster.yaml ``` 集群创建通常需要约 2 分钟。执行以下命令检查集群状态: diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md index 5d686aa48..3b7eca116 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/300.deploy-ob-operator.md @@ -32,7 +32,7 @@ helm install ob-operator ob-operator/ob-operator --namespace=oceanbase-system -- * 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` * 开发版本 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md index b41953402..c253f648c 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/400.ob-operator-upgrade.md @@ -17,7 +17,7 @@ sidebar_position: 4 - 稳定版本 ```shell - kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/deploy/operator.yaml + kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/deploy/operator.yaml ``` - 开发版本 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md index 5b32725a3..6d851b664 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/100.create-tenant.md @@ -16,7 +16,7 @@ sidebar_position: 2 ## 使用配置文件创建租户 -通过应用租户配置文件创建租户。配置文件内容可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.1.0_release/deploy/tenant.yaml) 。 +通过应用租户配置文件创建租户。配置文件内容可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.0_release/deploy/tenant.yaml) 。 创建租户的命令如下,该命令会在当前 Kubernetes 集群中创建一个 OBTenant 租户的资源。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md index 74f86ac54..340d47cd7 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/500.ob-operator-user-guide/200.tenant-management-of-ob-operator/300.delete-tenant-of-ob-operator.md @@ -8,7 +8,7 @@ sidebar_position: 4 ## 具体操作 -通过配置文件 tenant.yaml 在集群中删除指定的租户资源。配置文件可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.1.0_release/deploy/tenant.yaml)。 +通过配置文件 tenant.yaml 在集群中删除指定的租户资源。配置文件可参考 [GitHub](https://github.com/oceanbase/ob-operator/blob/2.2.0_release/deploy/tenant.yaml)。 删除租户的命令如下,该命令会在当前 Kubernetes 集群中删除对应租户的 OBTenant 资源。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md index b2433fad2..bad81605a 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-docs/current/manual/900.appendix/100.example.md @@ -26,7 +26,7 @@ sidebar_position: 1 创建 namespace。 ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/namespace.yaml ``` 使用以下命令查看创建的 namespace: @@ -45,7 +45,7 @@ oceanbase Active 98s 创建集群和租户的 secret: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/secret.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/secret.yaml ``` 通过以下命令查看创建的 secret: @@ -72,7 +72,7 @@ ob-configserver 是提供 OceanBase rootservice 信息注册和查询的服务 使用如下命令部署 ob-configserver 以及创建对应的 service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/configserver.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/configserver.yaml ``` 检查 pod 状态: @@ -100,7 +100,7 @@ svc-ob-configserver NodePort 10.96.3.39 8080:30080/TCP 98s 使用如下命令部署 OceanBase 集群: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/obcluster.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/obcluster.yaml ``` 轮询使用如下命令检查 obcluster 状态,直到集群变成 running 状态。 @@ -120,7 +120,7 @@ ObProxy 支持使用 ob-configserver 或者直接指定 rs_list 的形式启动 使用如下命令部署 ObProxy 以及创建 service: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/obproxy.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/obproxy.yaml ``` 查看 ObProxy 的 pod 状态,会有两个 obproxy 的 pod。 @@ -164,7 +164,7 @@ mysql -h${obproxy-service-address} -P2883 -uroot@sys#metadb -p 使用如下命令创建租户: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/tenant.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/tenant.yaml ``` 创建后轮询租户的资源状态, 当变成 running 时表示租户以及创建完成了 @@ -203,7 +203,7 @@ create database ocp_monitordb; 使用如下命令部署应用: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/ocp.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/ocp.yaml ``` 部署成功之后,可以通过如下命令进行查看部署的状态: @@ -236,7 +236,7 @@ curl -L 'http://${service_ip}:${server_port}/api/v2/time' 使用如下命令部署 prometheus: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/prometheus.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/prometheus.yaml ``` 使用如下命令查看部署状态: @@ -258,7 +258,7 @@ grafana 可以使用 prometheus 作为数据源,进行 OceanBase 指标的展 使用如下命令部署 grafana: ```shell -kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.1.0_release/example/webapp/grafana.yaml +kubectl apply -f https://raw.githubusercontent.com/oceanbase/ob-operator/2.2.0_release/example/webapp/grafana.yaml ``` 使用如下命令查看部署状态: @@ -284,4 +284,4 @@ svc-grafana NodePort 10.96.2.145 3000:30030/TCP 2m ## 说明 -本文中的配置文件均可在 [webapp 配置文件](https://github.com/oceanbase/ob-operator/tree/2.1.0_release/example/webapp) 目录中找到。 +本文中的配置文件均可在 [webapp 配置文件](https://github.com/oceanbase/ob-operator/tree/2.2.0_release/example/webapp) 目录中找到。 diff --git a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx index 2038b8be3..d7910f920 100644 --- a/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx +++ b/docsite/i18n/zh-Hans/docusaurus-plugin-content-pages/index.mdx @@ -133,7 +133,7 @@ mysql -h{POD_IP} -P2881 -uroot -proot_password oceanbase -A -c ``` helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update ob-operator -helm install oceanbase-dashboard ob-operator/oceanbase-dashboard --version=0.2.0 +helm install oceanbase-dashboard ob-operator/oceanbase-dashboard ``` ![oceanbase-dashboard-install](/img/oceanbase-dashboard-install.jpg) @@ -144,12 +144,13 @@ echo $(kubectl get -n default secret oceanbase-dashboard-user-credentials -o jso ``` 一个 NodePort 类型的 service 会默认创建,可以通过如下命令查看 service 的地址,然后在浏览器中打开。 ``` -kubectl get svc oceanbase-dashboard-ob-dashboard +kubectl get svc oceanbase-dashboard-oceanbase-dashboard ``` ![oceanbase-dashboard-service](/img/oceanbase-dashboard-service.jpg) 使用 admin 账号和查看到的密码登录。 ![oceanbase-dashboard-overview](/img/oceanbase-dashboard-overview.jpg) +![oceanbase-dashboard-topology](/img/oceanbase-dashboard-topology.jpg) ## 项目架构 @@ -203,8 +204,13 @@ ob-operator 使用 [kubebuilder](https://book.kubebuilder.io/introduction) 项 - [GitHub Issue](https://github.com/oceanbase/ob-operator/issues) - [官方论坛](https://ask.oceanbase.com/) - [Slack](https://oceanbase.slack.com/archives/C053PT371S7) -- 钉钉群([二维码](/img/dingtalk-operator-users.png)) - 微信群(请添加小助手微信,微信号: OBCE666) +- 钉钉群([二维码](/img/dingtalk-operator-users.png)) + + +
+![钉钉群二维码](/img/dingtalk-operator-users.png) +
## 参与开发 diff --git a/docsite/src/pages/index.mdx b/docsite/src/pages/index.mdx index fa0cc3a48..efcdaba79 100644 --- a/docsite/src/pages/index.mdx +++ b/docsite/src/pages/index.mdx @@ -133,7 +133,7 @@ Deploy OceanBase Dashboard is pretty simple, just run the following commands ``` helm repo add ob-operator https://oceanbase.github.io/ob-operator/ helm repo update ob-operator -helm install oceanbase-dashboard ob-operator/oceanbase-dashboard --version=0.2.0 +helm install oceanbase-dashboard ob-operator/oceanbase-dashboard ``` ![oceanbase-dashboard-install](/img/oceanbase-dashboard-install.jpg) @@ -144,12 +144,13 @@ echo $(kubectl get -n default secret oceanbase-dashboard-user-credentials -o jso ``` A service of type NodePort is created by default, you can check the address and port and open it in browser ``` -kubectl get svc oceanbase-dashboard-ob-dashboard +kubectl get svc oceanbase-dashboard-oceanbase-dashboard ``` ![oceanbase-dashboard-service](/img/oceanbase-dashboard-service.jpg) Login with admin user and password ![oceanbase-dashboard-overview](/img/oceanbase-dashboard-overview.jpg) +![oceanbase-dashboard-topology](/img/oceanbase-dashboard-topology.jpg) ## Project Architecture @@ -202,8 +203,12 @@ If you encounter any issues while using ob-operator, please feel free to seek he - [GitHub Issue](https://github.com/oceanbase/ob-operator/issues) - [Official Forum](https://ask.oceanbase.com/) (in Chinese) - [Slack](https://oceanbase.slack.com/archives/C053PT371S7) -- DingTalk Group ([QRCode](/img/dingtalk-operator-users.png)) - WeChat Group (Add the assistant with WeChat ID: OBCE666) +- DingTalk Group ([QRCode](/img/dingtalk-operator-users.png)) + +
+![DingTalk Group QRCode](/img/dingtalk-operator-users.png) +
## Contributing diff --git a/docsite/static/img/oceanbase-dashboard-install.jpg b/docsite/static/img/oceanbase-dashboard-install.jpg index 35d5a9e42..1c78f97f1 100644 Binary files a/docsite/static/img/oceanbase-dashboard-install.jpg and b/docsite/static/img/oceanbase-dashboard-install.jpg differ diff --git a/docsite/static/img/oceanbase-dashboard-overview.jpg b/docsite/static/img/oceanbase-dashboard-overview.jpg index 4101fdd52..00d9f3783 100644 Binary files a/docsite/static/img/oceanbase-dashboard-overview.jpg and b/docsite/static/img/oceanbase-dashboard-overview.jpg differ diff --git a/docsite/static/img/oceanbase-dashboard-topology.jpg b/docsite/static/img/oceanbase-dashboard-topology.jpg new file mode 100644 index 000000000..a514fd868 Binary files /dev/null and b/docsite/static/img/oceanbase-dashboard-topology.jpg differ diff --git a/docsite/static/index.yaml b/docsite/static/index.yaml index ab48796c0..de09e8629 100644 --- a/docsite/static/index.yaml +++ b/docsite/static/index.yaml @@ -83,6 +83,16 @@ entries: - https://github.com/oceanbase/ob-operator/releases/download/oceanbase-cluster-4.2.1-sp.1-101010012023111012/oceanbase-cluster-4.2.1-sp.1-101010012023111012.tgz version: 4.2.1-sp.1-101010012023111012 oceanbase-dashboard: + - apiVersion: v2 + appVersion: 0.2.1 + created: "2024-04-25T12:43:20.075309091Z" + description: A Helm chart for OceanBase dashboard + digest: 7bb9af18db8cd3e7e712dfd57099a9fbb916f1d7aeb15c3f42d9d2f507440261 + name: oceanbase-dashboard + type: application + urls: + - https://github.com/oceanbase/ob-operator/releases/download/oceanbase-dashboard-0.2.1/oceanbase-dashboard-0.2.1.tgz + version: 0.2.1 - apiVersion: v2 appVersion: 0.2.0 created: "2024-04-12T09:11:29.482191694Z" @@ -103,4 +113,4 @@ entries: urls: - https://github.com/oceanbase/ob-operator/releases/download/oceanbase-dashboard-0.1.0/oceanbase-dashboard-0.1.0.tgz version: 0.1.0 -generated: "2024-04-16T06:22:47.327439739Z" \ No newline at end of file +generated: "2024-04-25T12:43:20.075350038Z" diff --git a/internal/config/operator/config_suite_test.go b/internal/config/operator/config_suite_test.go new file mode 100644 index 000000000..6a671370e --- /dev/null +++ b/internal/config/operator/config_suite_test.go @@ -0,0 +1,25 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package operator + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestOperator(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Operator Suite") +} diff --git a/internal/config/operator/default.go b/internal/config/operator/default.go new file mode 100644 index 000000000..5cdbbea05 --- /dev/null +++ b/internal/config/operator/default.go @@ -0,0 +1,77 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package operator + +import ( + "github.com/spf13/viper" + + oc "github.com/oceanbase/ob-operator/internal/const/oceanbase" + "github.com/oceanbase/ob-operator/pkg/database" +) + +var defaultConfigMap = map[string]any{ + "namespace": "", + "manager-namespace": "oceanbase-system", + "metric-addr": ":8080", + "leader-elect": true, + "health-probe-bind-address": ":8081", + "log-verbosity": 0, + "disable-webhooks": false, + + "task.debug": false, + "task.poolSize": 10000, + + "telemetry.disabled": false, + "telemetry.debug": false, + "telemetry.host": "https://openwebapi.oceanbase.com", + + "database.connectionLRUCacheSize": database.DefaultLRUCacheSize, + + "resource.defaultDiskExpandPercent": oc.DefaultDiskExpandPercent, + "resource.defaultLogPercent": oc.DefaultLogPercent, + "resource.initialDataDiskUsePercent": oc.InitialDataDiskUsePercent, + "resource.defaultDiskUsePercent": oc.DefaultDiskUsePercent, + "resource.defaultMemoryLimitPercent": oc.DefaultMemoryLimitPercent, + "resource.defaultMemoryLimitSize": oc.DefaultMemoryLimitSize, + "resource.defaultDatafileMaxSize": oc.DefaultDatafileMaxSize, + "resource.defaultDatafileNextSize": oc.DefaultDatafileNextSize, + "resource.minMemorySize": oc.MinMemorySizeS, + "resource.minDataDiskSize": oc.MinDataDiskSizeS, + "resource.minRedoLogDiskSize": oc.MinRedoLogDiskSizeS, + "resource.minLogDiskSize": oc.MinLogDiskSizeS, + + "time.tenantOpRetryTimes": oc.TenantOpRetryTimes, + "time.tenantOpRetryGapSeconds": oc.TenantOpRetryGapSeconds, + "time.taskMaxRetryTimes": oc.TaskMaxRetryTimes, + "time.taskRetryBackoffThreshold": oc.TaskRetryBackoffThreshold, + "time.probeCheckPeriodSeconds": oc.ProbeCheckPeriodSeconds, + "time.probeCheckDelaySeconds": oc.ProbeCheckDelaySeconds, + "time.getConnectionMaxRetries": oc.GetConnectionMaxRetries, + "time.checkConnectionInterval": oc.CheckConnectionInterval, + "time.checkJobInterval": oc.CheckJobInterval, + "time.checkJobMaxRetries": oc.CheckJobMaxRetries, + "time.commonCheckInterval": oc.CommonCheckInterval, + "time.bootstrapTimeoutSeconds": oc.BootstrapTimeoutSeconds, + "time.localityChangeTimeoutSeconds": oc.LocalityChangeTimeoutSeconds, + "time.defaultStateWaitTimeout": oc.DefaultStateWaitTimeout, + "time.timeConsumingStateWaitTimeout": oc.TimeConsumingStateWaitTimeout, + "time.waitForJobTimeoutSeconds": oc.WaitForJobTimeoutSeconds, + "time.serverDeleteTimeoutSeconds": oc.ServerDeleteTimeoutSeconds, + "time.tolerateServerPodNotReadyMinutes": oc.TolerateServerPodNotReadyMinutes, +} + +func setDefaultConfigs(vp *viper.Viper) { + for k, v := range defaultConfigMap { + vp.SetDefault(k, v) + } +} diff --git a/internal/config/operator/new.go b/internal/config/operator/new.go new file mode 100644 index 000000000..551ac43c2 --- /dev/null +++ b/internal/config/operator/new.go @@ -0,0 +1,70 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package operator + +import ( + "flag" + "strings" + "sync" + + "github.com/spf13/pflag" + "github.com/spf13/viper" +) + +var ( + cfgOnce sync.Once + cfg *Config +) + +func newConfig() *Config { + v := viper.New() + v.AddConfigPath(".") + v.AddConfigPath("/etc/admin/oceanbase") + v.SetConfigName(".ob-operator") + v.SetConfigType("yaml") + + setDefaultConfigs(v) + + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + pflag.Parse() + _ = v.BindPFlags(pflag.CommandLine) + + v.AutomaticEnv() + v.SetEnvPrefix("OB_OPERATOR") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "")) + + config := &Config{} + if err := v.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + panic(err) + } + } + if err := v.Unmarshal(config); err != nil { + panic(err) + } + config.v = v + return config +} + +func GetConfig() *Config { + if cfg == nil { + cfgOnce.Do(func() { + cfg = newConfig() + }) + } + return cfg +} + +func (c *Config) Write() error { + return c.v.WriteConfigAs(".ob-operator.yaml") +} diff --git a/internal/config/operator/new_test.go b/internal/config/operator/new_test.go new file mode 100644 index 000000000..9bcaa5f54 --- /dev/null +++ b/internal/config/operator/new_test.go @@ -0,0 +1,115 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package operator + +import ( + "flag" + "os" + + "github.com/mitchellh/mapstructure" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Config", func() { + GinkgoHelper() + + Context("default", func() { + It("should return default config", func() { + output := Config{} + Expect(mapstructure.Decode(defaultConfigMap, &output)).To(Succeed()) + + got := newConfig() + Expect(got.Database.ConnectionLRUCacheSize).To(BeEquivalentTo(defaultConfigMap["database.connectionLRUCacheSize"])) + Expect(got.Resource.DefaultDiskExpandPercent).To(BeEquivalentTo(defaultConfigMap["resource.defaultDiskExpandPercent"])) + Expect(got.Resource.DefaultLogPercent).To(BeEquivalentTo(defaultConfigMap["resource.defaultLogPercent"])) + Expect(got.Resource.InitialDataDiskUsePercent).To(BeEquivalentTo(defaultConfigMap["resource.initialDataDiskUsePercent"])) + Expect(got.Resource.DefaultDiskUsePercent).To(BeEquivalentTo(defaultConfigMap["resource.defaultDiskUsePercent"])) + Expect(got.Resource.DefaultMemoryLimitPercent).To(BeEquivalentTo(defaultConfigMap["resource.defaultMemoryLimitPercent"])) + Expect(got.Resource.DefaultMemoryLimitSize).To(BeEquivalentTo(defaultConfigMap["resource.defaultMemoryLimitSize"])) + Expect(got.Resource.DefaultDatafileMaxSize).To(BeEquivalentTo(defaultConfigMap["resource.defaultDatafileMaxSize"])) + Expect(got.Resource.DefaultDatafileNextSize).To(BeEquivalentTo(defaultConfigMap["resource.defaultDatafileNextSize"])) + Expect(got.Resource.MinMemorySize).To(BeEquivalentTo(defaultConfigMap["resource.minMemorySize"])) + Expect(got.Resource.MinDataDiskSize).To(BeEquivalentTo(defaultConfigMap["resource.minDataDiskSize"])) + Expect(got.Resource.MinRedoLogDiskSize).To(BeEquivalentTo(defaultConfigMap["resource.minRedoLogDiskSize"])) + Expect(got.Resource.MinLogDiskSize).To(BeEquivalentTo(defaultConfigMap["resource.minLogDiskSize"])) + Expect(got.Time.TenantOpRetryTimes).To(BeEquivalentTo(defaultConfigMap["time.tenantOpRetryTimes"])) + Expect(got.Time.TenantOpRetryGapSeconds).To(BeEquivalentTo(defaultConfigMap["time.tenantOpRetryGapSeconds"])) + Expect(got.Time.TaskMaxRetryTimes).To(BeEquivalentTo(defaultConfigMap["time.taskMaxRetryTimes"])) + Expect(got.Time.TaskRetryBackoffThreshold).To(BeEquivalentTo(defaultConfigMap["time.taskRetryBackoffThreshold"])) + Expect(got.Time.ProbeCheckPeriodSeconds).To(BeEquivalentTo(defaultConfigMap["time.probeCheckPeriodSeconds"])) + Expect(got.Time.ProbeCheckDelaySeconds).To(BeEquivalentTo(defaultConfigMap["time.probeCheckDelaySeconds"])) + Expect(got.Time.GetConnectionMaxRetries).To(BeEquivalentTo(defaultConfigMap["time.getConnectionMaxRetries"])) + Expect(got.Time.CheckConnectionInterval).To(BeEquivalentTo(defaultConfigMap["time.checkConnectionInterval"])) + Expect(got.Time.CheckJobInterval).To(BeEquivalentTo(defaultConfigMap["time.checkJobInterval"])) + Expect(got.Time.CheckJobMaxRetries).To(BeEquivalentTo(defaultConfigMap["time.checkJobMaxRetries"])) + Expect(got.Time.CommonCheckInterval).To(BeEquivalentTo(defaultConfigMap["time.commonCheckInterval"])) + Expect(got.Time.BootstrapTimeoutSeconds).To(BeEquivalentTo(defaultConfigMap["time.bootstrapTimeoutSeconds"])) + Expect(got.Time.LocalityChangeTimeoutSeconds).To(BeEquivalentTo(defaultConfigMap["time.localityChangeTimeoutSeconds"])) + Expect(got.Time.DefaultStateWaitTimeout).To(BeEquivalentTo(defaultConfigMap["time.defaultStateWaitTimeout"])) + Expect(got.Time.TimeConsumingStateWaitTimeout).To(BeEquivalentTo(defaultConfigMap["time.timeConsumingStateWaitTimeout"])) + Expect(got.Time.WaitForJobTimeoutSeconds).To(BeEquivalentTo(defaultConfigMap["time.waitForJobTimeoutSeconds"])) + Expect(got.Time.ServerDeleteTimeoutSeconds).To(BeEquivalentTo(defaultConfigMap["time.serverDeleteTimeoutSeconds"])) + Expect(got.Telemetry.Disabled).To(BeEquivalentTo(defaultConfigMap["telemetry.disabled"])) + Expect(got.Telemetry.Debug).To(BeEquivalentTo(defaultConfigMap["telemetry.debug"])) + Expect(got.Telemetry.Host).To(BeEquivalentTo(defaultConfigMap["telemetry.host"])) + Expect(got.Task.Debug).To(BeEquivalentTo(defaultConfigMap["task.debug"])) + Expect(got.Task.PoolSize).To(BeEquivalentTo(defaultConfigMap["task.poolSize"])) + // Expect(got.Manager.DisableWebhooks).To(BeEquivalentTo(defaultConfigMap["manager.disableWebhooks"])) + // Expect(got.Manager.LogVerbosity).To(BeEquivalentTo(defaultConfigMap["manager.logVerbosity"])) + }) + }) + + Context("envVars", func() { + BeforeEach(func() { + os.Setenv("OB_OPERATOR_TASK_POOLSIZE", "9876") + os.Setenv("OB_OPERATOR_TIME_TASKMAXRETRYTIMES", "1234") + }) + AfterEach(func() { + os.Unsetenv("OB_OPERATOR_TASK_POOLSIZE") + os.Unsetenv("OB_OPERATOR_TIME_TASKMAXRETRYTIMES") + }) + It("should return config with envVars", func() { + Expect(os.Getenv("OB_OPERATOR_TASK_POOLSIZE")).To(Equal("9876")) + got := newConfig() + Expect(got.Task.PoolSize).To(Equal(9876)) + Expect(got.Time.TaskMaxRetryTimes).To(Equal(1234)) + }) + }) + + Context("flags", func() { + It("should return config with flags", func() { + var namespace string + var managerNamespace string + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + var logVerbosity int + flag.StringVar(&namespace, "namespace", "", "The namespace to run oceanbase, default value is empty means all.") + flag.StringVar(&managerNamespace, "manager-namespace", "oceanbase-system", "The namespace to run manager tools.") + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + flag.IntVar(&logVerbosity, "log-verbosity", 0, "Log verbosity level, 0 is info, 1 is debug, 2 is trace") + Expect(flag.CommandLine.Parse([]string{ + "--log-verbosity", "1", + })).To(Succeed()) + GinkgoLogr.Info("logVerbosity", "logVerbosity", logVerbosity) + + got := newConfig() + Expect(got.Manager.LogVerbosity).To(Equal(1)) + }) + }) +}) diff --git a/internal/config/operator/types.go b/internal/config/operator/types.go new file mode 100644 index 000000000..5174b1fda --- /dev/null +++ b/internal/config/operator/types.go @@ -0,0 +1,93 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package operator + +import "github.com/spf13/viper" + +type Config struct { + v *viper.Viper + + Manager Manager `mapstructure:",squash" yaml:"manager"` + Database Database `mapstructure:"database" yaml:"database"` + Task Task `mapstructure:"task" yaml:"task"` + Telemetry Telemetry `mapstructure:"telemetry" yaml:"telemetry"` + Time Time `mapstructure:"time" yaml:"time"` + Resource Resource `mapstructure:"resource" yaml:"resource"` +} + +type Manager struct { + Namespace string `mapstructure:"namespace" yaml:"namespace"` + ManagerNamespace string `mapstructure:"manager-namespace" yaml:"managerNamespace"` + MetricsAddr string `mapstructure:"metrics-bind-address" yaml:"metricsAddr"` + LeaderElect bool `mapstructure:"leader-elect" yaml:"enableElect"` + ProbeAddr string `mapstructure:"health-probe-bind-address" yaml:"probeAddr"` + LogVerbosity int `mapstructure:"log-verbosity" yaml:"logVerbosity"` + DisableWebhooks bool `mapstructure:"disable-webhooks" yaml:"disableWebhooks"` +} + +type Task struct { + Debug bool `mapstructure:"debug" yaml:"debug"` + PoolSize int `mapstructure:"poolSize" yaml:"poolSize"` +} + +type Telemetry struct { + Disabled bool `mapstructure:"disabled" yaml:"disabled"` + Debug bool `mapstructure:"debug" yaml:"debug"` + Host string `mapstructure:"host" yaml:"host"` +} + +type Database struct { + ConnectionLRUCacheSize int `mapstructure:"connectionLRUCacheSize" yaml:"connectionLRUCacheSize"` +} + +type Resource struct { + DefaultDiskExpandPercent int `mapstructure:"defaultDiskExpandPercent" yaml:"defaultDiskExpandPercent"` + DefaultLogPercent int `mapstructure:"defaultLogPercent" yaml:"defaultLogPercent"` + InitialDataDiskUsePercent int `mapstructure:"initialDataDiskUsePercent" yaml:"initialDataDiskUsePercent"` + DefaultDiskUsePercent int `mapstructure:"defaultDiskUsePercent" yaml:"defaultDiskUsePercent"` + DefaultMemoryLimitPercent int `mapstructure:"defaultMemoryLimitPercent" yaml:"defaultMemoryLimitPercent"` + + DefaultMemoryLimitSize string `mapstructure:"defaultMemoryLimitSize" yaml:"defaultMemoryLimitSize"` + DefaultDatafileMaxSize string `mapstructure:"defaultDatafileMaxSize" yaml:"defaultDatafileMaxSize"` + DefaultDatafileNextSize string `mapstructure:"defaultDatafileNextSize" yaml:"defaultDatafileNextSize"` + + MinMemorySize string `mapstructure:"minMemorySize" yaml:"minMemorySizeQ"` + MinDataDiskSize string `mapstructure:"minDataDiskSize" yaml:"minDataDiskSizeQ"` + MinRedoLogDiskSize string `mapstructure:"minRedoLogDiskSize" yaml:"minRedoLogDiskSizeQ"` + MinLogDiskSize string `mapstructure:"minLogDiskSize" yaml:"minLogDiskSizeQ"` +} + +type Time struct { + TenantOpRetryTimes int `mapstructure:"tenantOpRetryTimes" yaml:"tenantOpRetryTimes"` + TenantOpRetryGapSeconds int `mapstructure:"tenantOpRetryGapSeconds" yaml:"tenantOpRetryGapSeconds"` + + TaskMaxRetryTimes int `mapstructure:"taskMaxRetryTimes" yaml:"taskMaxRetryTimes"` + TaskRetryBackoffThreshold int `mapstructure:"taskRetryBackoffThreshold" yaml:"taskRetryBackoffThreshold"` + + ProbeCheckPeriodSeconds int `mapstructure:"probeCheckPeriodSeconds" yaml:"probeCheckPeriodSeconds"` + ProbeCheckDelaySeconds int `mapstructure:"probeCheckDelaySeconds" yaml:"probeCheckDelaySeconds"` + GetConnectionMaxRetries int `mapstructure:"getConnectionMaxRetries" yaml:"getConnectionMaxRetries"` + CheckConnectionInterval int `mapstructure:"checkConnectionInterval" yaml:"checkConnectionInterval"` + CheckJobInterval int `mapstructure:"checkJobInterval" yaml:"checkJobInterval"` + CheckJobMaxRetries int `mapstructure:"checkJobMaxRetries" yaml:"checkJobMaxRetries"` + CommonCheckInterval int `mapstructure:"commonCheckInterval" yaml:"commonCheckInterval"` + + BootstrapTimeoutSeconds int `mapstructure:"bootstrapTimeoutSeconds" yaml:"bootstrapTimeoutSeconds"` + LocalityChangeTimeoutSeconds int `mapstructure:"localityChangeTimeoutSeconds" yaml:"localityChangeTimeoutSeconds"` + DefaultStateWaitTimeout int `mapstructure:"defaultStateWaitTimeout" yaml:"defaultStateWaitTimeout"` + TimeConsumingStateWaitTimeout int `mapstructure:"timeConsumingStateWaitTimeout" yaml:"timeConsumingStateWaitTimeout"` + WaitForJobTimeoutSeconds int `mapstructure:"waitForJobTimeoutSeconds" yaml:"waitForJobTimeoutSeconds"` + ServerDeleteTimeoutSeconds int `mapstructure:"serverDeleteTimeoutSeconds" yaml:"serverDeleteTimeoutSeconds"` + + TolerateServerPodNotReadyMinutes int `mapstructure:"tolerateServerPodNotReadyMinutes" yaml:"tolerateServerPodNotReadyMinutes"` +} diff --git a/internal/const/oceanbase/resource.go b/internal/const/oceanbase/resource.go index 90c3e2279..d6400dd2a 100644 --- a/internal/const/oceanbase/resource.go +++ b/internal/const/oceanbase/resource.go @@ -20,8 +20,11 @@ const ( InitialDataDiskUsePercent = 20 DefaultDiskUsePercent = 95 DefaultMemoryLimitPercent = 90 - GigaConverter = 1 << 30 - MegaConverter = 1 << 20 +) + +const ( + GigaConverter = 1 << 30 + MegaConverter = 1 << 20 ) const ( @@ -30,9 +33,16 @@ const ( DefaultDatafileNextSize = "1G" ) +const ( + MinMemorySizeS = "8Gi" + MinDataDiskSizeS = "30Gi" + MinRedoLogDiskSizeS = "30Gi" + MinLogDiskSizeS = "10Gi" +) + var ( - MinMemorySize = resource.MustParse("8Gi") - MinDataDiskSize = resource.MustParse("30Gi") - MinRedoLogDiskSize = resource.MustParse("30Gi") - MinLogDiskSize = resource.MustParse("10Gi") + MinMemorySize = resource.MustParse(MinMemorySizeS) + MinDataDiskSize = resource.MustParse(MinDataDiskSizeS) + MinRedoLogDiskSize = resource.MustParse(MinRedoLogDiskSizeS) + MinLogDiskSize = resource.MustParse(MinLogDiskSizeS) ) diff --git a/internal/dashboard/handler/conn_handler.go b/internal/dashboard/handler/conn_handler.go index de45ede56..013aca73d 100644 --- a/internal/dashboard/handler/conn_handler.go +++ b/internal/dashboard/handler/conn_handler.go @@ -201,7 +201,7 @@ func CreateOBTenantConnTerminal(c *gin.Context) (*response.OBConnection, error) } // Select unit information from the oceanbase cluster - db, err := getSysClient(c, obcluster, oceanbaseconst.RootUser, oceanbaseconst.SysTenant, obtenant.Spec.Credentials.Root) + db, err := getSysClient(c, obcluster, oceanbaseconst.RootUser, oceanbaseconst.SysTenant, obcluster.Spec.UserSecrets.Root) if err != nil { return nil, httpErr.NewInternal(err.Error()) } diff --git a/internal/resource/obcluster/obcluster_task.go b/internal/resource/obcluster/obcluster_task.go index 082b1ccd3..1fcab8426 100644 --- a/internal/resource/obcluster/obcluster_task.go +++ b/internal/resource/obcluster/obcluster_task.go @@ -33,6 +33,7 @@ import ( apitypes "github.com/oceanbase/ob-operator/api/types" v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" obagentconst "github.com/oceanbase/ob-operator/internal/const/obagent" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" zonestatus "github.com/oceanbase/ob-operator/internal/const/status/obzone" @@ -54,7 +55,7 @@ func WaitOBZoneTopologyMatch(_ *OBClusterManager) tasktypes.TaskError { func WaitOBZoneDeleted(m *OBClusterManager) tasktypes.TaskError { waitSuccess := false - for i := 1; i < oceanbaseconst.ServerDeleteTimeoutSeconds; i++ { + for i := 1; i < obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds; i++ { obcluster, err := m.getOBCluster() if err != nil { return errors.Wrap(err, "get obcluster failed") @@ -212,11 +213,11 @@ func Bootstrap(m *OBClusterManager) tasktypes.TaskError { return errors.Wrap(err, "no obzone belongs to this cluster") } var manager *operation.OceanbaseOperationManager - for i := 0; i < oceanbaseconst.GetConnectionMaxRetries; i++ { + for i := 0; i < obcfg.GetConfig().Time.GetConnectionMaxRetries; i++ { manager, err = m.getOceanbaseOperationManager() if err != nil || manager == nil { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Get oceanbase operation manager failed") - time.Sleep(time.Second * oceanbaseconst.CheckConnectionInterval) + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.CheckConnectionInterval)) } else { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Successfully got oceanbase operation manager") break @@ -393,7 +394,7 @@ func ValidateUpgradeInfo(m *OBClusterManager) tasktypes.TaskError { return false, errors.Wrap(err, "Failed to run validate job") } } - err = resourceutils.CheckJobWithTimeout(check, time.Second*oceanbaseconst.WaitForJobTimeoutSeconds) + err = resourceutils.CheckJobWithTimeout(check, time.Second*time.Duration(obcfg.GetConfig().Time.WaitForJobTimeoutSeconds)) if err != nil { return errors.Wrap(err, "Failed to run validate job") } @@ -535,7 +536,7 @@ func ModifySysTenantReplica(m *OBClusterManager) tasktypes.TaskError { if err != nil { return errors.Wrapf(err, "Failed to set sys locality to %s", locality) } - err = oceanbaseOperationManager.WaitTenantLocalityChangeFinished(oceanbaseconst.SysTenant, oceanbaseconst.LocalityChangeTimeoutSeconds) + err = oceanbaseOperationManager.WaitTenantLocalityChangeFinished(oceanbaseconst.SysTenant, obcfg.GetConfig().Time.LocalityChangeTimeoutSeconds) if err != nil { return errors.Wrapf(err, "Locality change to %s not finished after timeout", locality) } @@ -561,7 +562,7 @@ func ModifySysTenantReplica(m *OBClusterManager) tasktypes.TaskError { if err != nil { return errors.Wrapf(err, "Failed to set sys locality to %s", locality) } - err = oceanbaseOperationManager.WaitTenantLocalityChangeFinished(oceanbaseconst.SysTenant, oceanbaseconst.LocalityChangeTimeoutSeconds) + err = oceanbaseOperationManager.WaitTenantLocalityChangeFinished(oceanbaseconst.SysTenant, obcfg.GetConfig().Time.LocalityChangeTimeoutSeconds) if err != nil { return errors.Wrapf(err, "Locality change to %s not finished after timeout", locality) } @@ -791,7 +792,7 @@ outerLoop: } if len(podList.Items) == 0 { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("No pod found for check image pull job") - time.Sleep(time.Second * oceanbaseconst.CheckJobInterval) + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.CheckJobInterval)) continue } pod := podList.Items[0] @@ -813,7 +814,7 @@ outerLoop: default: m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Container is waiting", "reason", containerStatus.State.Waiting.Reason, "message", containerStatus.State.Waiting.Message) } - time.Sleep(time.Second * oceanbaseconst.CheckJobInterval) + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.CheckJobInterval)) continue outerLoop } else if containerStatus.State.Running != nil || containerStatus.State.Terminated != nil { m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Container is running or terminated") @@ -832,66 +833,26 @@ outerLoop: func CheckClusterMode(m *OBClusterManager) tasktypes.TaskError { var err error modeAnnoVal, modeAnnoExist := resourceutils.GetAnnotationField(m.OBCluster, oceanbaseconst.AnnotationsMode) - if modeAnnoExist && modeAnnoVal == oceanbaseconst.ModeStandalone { - var backoffLimit int32 - var ttl int32 = 300 - jobName := "standalone-validate-" + rand.String(8) - standaloneValidateJob := &batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: m.OBCluster.Namespace, - OwnerReferences: []metav1.OwnerReference{{ - Kind: m.OBCluster.Kind, - APIVersion: m.OBCluster.APIVersion, - Name: m.OBCluster.Name, - UID: m.OBCluster.UID, - }}, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Name: "helper-validate-standalone", - Image: m.OBCluster.Spec.OBServerTemplate.Image, - Command: []string{"bash", "-c", "/home/admin/oceanbase/bin/oceanbase-helper standalone validate"}, - }}, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, - BackoffLimit: &backoffLimit, - TTLSecondsAfterFinished: &ttl, - }, - } - m.Logger.V(oceanbaseconst.LogLevelDebug).Info("Create check version job", "job", jobName) - - err = m.Client.Create(m.Ctx, standaloneValidateJob) - if err != nil { - return errors.Wrap(err, "Create check version job") - } - - var jobObject *batchv1.Job - var maxCheckTimes = 600 - for i := 0; i < maxCheckTimes; i++ { - time.Sleep(time.Second * oceanbaseconst.CheckJobInterval) - jobObject, err = resourceutils.GetJob(m.Ctx, m.Client, m.OBCluster.Namespace, jobName) - if err != nil { - m.Logger.Error(err, "Failed to get job") - return err - } - if jobObject.Status.Succeeded == 0 && jobObject.Status.Failed == 0 { - m.Logger.V(oceanbaseconst.LogLevelDebug).Info("OBServer version check job is still running") - } else { - m.Logger.V(oceanbaseconst.LogLevelDebug).Info("OBServer version check job finished") - break - } - } - if jobObject.Status.Failed > 0 { - m.Logger.Info("Current image does not support standalone mode") - err := errors.New("Current image does not support standalone mode") - m.PrintErrEvent(err) - return err + if modeAnnoExist { + switch modeAnnoVal { + case oceanbaseconst.ModeStandalone: + _, _, err = resourceutils.RunJob(m.Ctx, m.Client, m.Logger, m.OBCluster.Namespace, + m.OBCluster.Name+"-standalone-validate", + m.OBCluster.Spec.OBServerTemplate.Image, + "/home/admin/oceanbase/bin/oceanbase-helper standalone validate", + ) + case oceanbaseconst.ModeService: + _, _, err = resourceutils.RunJob(m.Ctx, m.Client, m.Logger, m.OBCluster.Namespace, + m.OBCluster.Name+"-service-validate", + m.OBCluster.Spec.OBServerTemplate.Image, + "/home/admin/oceanbase/bin/oceanbase-helper service validate", + ) } } + if err != nil { + m.Logger.Info("Run cluster mode validate job failed", "error", err, "mode", modeAnnoVal) + return err + } return nil } @@ -969,27 +930,86 @@ func CheckMigration(m *OBClusterManager) tasktypes.TaskError { } func ScaleUpOBZones(m *OBClusterManager) tasktypes.TaskError { - return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenScaling, zonestatus.ScaleUp, oceanbaseconst.DefaultStateWaitTimeout)() + return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenScaling, zonestatus.ScaleUp, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func ExpandPVC(m *OBClusterManager) tasktypes.TaskError { - return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenExpandingPVC, zonestatus.ExpandPVC, oceanbaseconst.DefaultStateWaitTimeout)() + return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenExpandingPVC, zonestatus.ExpandPVC, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func MountBackupVolume(m *OBClusterManager) tasktypes.TaskError { - return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenMountingBackupVolume, zonestatus.MountBackupVolume, oceanbaseconst.DefaultStateWaitTimeout)() + return m.modifyOBZonesAndCheckStatus(m.changeZonesWhenMountingBackupVolume, zonestatus.MountBackupVolume, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitOBZoneBootstrapReady(m *OBClusterManager) tasktypes.TaskError { - return m.generateWaitOBZoneStatusFunc(zonestatus.BootstrapReady, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBZoneStatusFunc(zonestatus.BootstrapReady, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitOBZoneRunning(m *OBClusterManager) tasktypes.TaskError { - return m.generateWaitOBZoneStatusFunc(zonestatus.Running, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBZoneStatusFunc(zonestatus.Running, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func CheckEnvironment(m *OBClusterManager) tasktypes.TaskError { - _, exitCode, err := resourceutils.RunJob(m.Ctx, m.Client, m.Logger, m.OBCluster.Namespace, "check-fs", m.OBCluster.Spec.OBServerTemplate.Image, "/home/admin/oceanbase/bin/oceanbase-helper env-check storage "+oceanbaseconst.ClogPath) + volumeName := m.OBCluster.Name + "check-clog-volume-" + rand.String(6) + claimName := m.OBCluster.Name + "check-clog-claim-" + rand.String(6) + jobName := m.OBCluster.Name + "-check-fs-" + rand.String(6) + // Create PVC + storageSpec := m.OBCluster.Spec.OBServerTemplate.Storage.RedoLogStorage + requestsResources := corev1.ResourceList{} + // Try fallocate to check if the filesystem meet the requirement. + // The checker requires 4Mi space, we set the request to 64Mi for safety. + requestsResources["storage"] = storageSpec.Size + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName, + Namespace: m.OBCluster.Namespace, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: m.OBCluster.APIVersion, + Kind: m.OBCluster.Kind, + Name: m.OBCluster.Name, + UID: m.OBCluster.UID, + }}, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: requestsResources, + }, + StorageClassName: &storageSpec.StorageClass, + }, + } + err := m.Client.Create(m.Ctx, pvc) + if err != nil { + return errors.Wrap(err, "Create pvc for checking storage") + } + defer func() { + err = m.Client.Delete(m.Ctx, pvc) + if err != nil { + m.Logger.Info("Failed to delete pvc for checking storage") + } + }() + // Assemble volumeConfigs + volumeConfigs := resourceutils.JobContainerVolumes{ + Volumes: []corev1.Volume{{ + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, + }, + }}, + VolumeMounts: []corev1.VolumeMount{{ + Name: volumeName, + MountPath: oceanbaseconst.ClogPath, + }}, + } + _, exitCode, err := resourceutils.RunJob( + m.Ctx, m.Client, m.Logger, m.OBCluster.Namespace, + jobName, + m.OBCluster.Spec.OBServerTemplate.Image, + "/home/admin/oceanbase/bin/oceanbase-helper env-check storage "+oceanbaseconst.ClogPath, + volumeConfigs, + ) // exit code 1 means the image version does not support the env-check command, just ignore it and try if err != nil && exitCode != 1 { return errors.Wrap(err, "Check filesystem") diff --git a/internal/resource/obcluster/utils.go b/internal/resource/obcluster/utils.go index 9af1dd3c1..1a437fb9e 100644 --- a/internal/resource/obcluster/utils.go +++ b/internal/resource/obcluster/utils.go @@ -25,6 +25,7 @@ import ( apitypes "github.com/oceanbase/ob-operator/api/types" "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" zonestatus "github.com/oceanbase/ob-operator/internal/const/status/obzone" resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" @@ -390,7 +391,7 @@ func (m *OBClusterManager) WaitOBZoneUpgradeFinished(zoneName string) error { } return false, nil } - err := resourceutils.CheckJobWithTimeout(check, time.Second*oceanbaseconst.WaitForJobTimeoutSeconds) + err := resourceutils.CheckJobWithTimeout(check, time.Second*time.Duration(obcfg.GetConfig().Time.WaitForJobTimeoutSeconds)) if err != nil { return errors.Wrap(err, "Timeout to wait obzone upgrade finished") } diff --git a/internal/resource/observer/observer_task.go b/internal/resource/observer/observer_task.go index a6542b85a..8f4ffb174 100644 --- a/internal/resource/observer/observer_task.go +++ b/internal/resource/observer/observer_task.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" apitypes "github.com/oceanbase/ob-operator/api/types" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" podconst "github.com/oceanbase/ob-operator/internal/const/pod" clusterstatus "github.com/oceanbase/ob-operator/internal/const/status/obcluster" @@ -82,7 +83,7 @@ func AddServer(m *OBServerManager) tasktypes.TaskError { } func WaitOBClusterBootstrapped(m *OBServerManager) tasktypes.TaskError { - for i := 0; i < oceanbaseconst.BootstrapTimeoutSeconds; i++ { + for i := 0; i < obcfg.GetConfig().Time.BootstrapTimeoutSeconds; i++ { obcluster, err := m.getOBCluster() if err != nil { return errors.Wrap(err, "Get obcluster from K8s") @@ -285,7 +286,7 @@ func UpgradeOBServerImage(m *OBServerManager) tasktypes.TaskError { func WaitOBServerPodReady(m *OBServerManager) tasktypes.TaskError { observerPodRestarted := false - for i := 0; i < oceanbaseconst.DefaultStateWaitTimeout; i++ { + for i := 0; i < obcfg.GetConfig().Time.DefaultStateWaitTimeout; i++ { observerPod, err := m.getPod() if err != nil { return errors.Wrapf(err, "Failed to get pod of observer %s", m.OBServer.Name) @@ -320,7 +321,7 @@ func WaitOBServerActiveInCluster(m *OBServerManager) tasktypes.TaskError { Port: oceanbaseconst.RpcPort, } active := false - for i := 0; i < oceanbaseconst.DefaultStateWaitTimeout; i++ { + for i := 0; i < obcfg.GetConfig().Time.DefaultStateWaitTimeout; i++ { operationManager, err := m.getOceanbaseOperationManager() if err != nil { return errors.Wrapf(err, "Get oceanbase operation manager failed") @@ -355,7 +356,7 @@ func WaitOBServerDeletedInCluster(m *OBServerManager) tasktypes.TaskError { Port: oceanbaseconst.RpcPort, } deleted := false - for i := 0; i < oceanbaseconst.ServerDeleteTimeoutSeconds; i++ { + for i := 0; i < obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds; i++ { operationManager, err := m.getOceanbaseOperationManager() if err != nil { return errors.Wrapf(err, "Get oceanbase operation manager failed") @@ -395,7 +396,7 @@ func DeletePod(m *OBServerManager) tasktypes.TaskError { func WaitForPodDeleted(m *OBServerManager) tasktypes.TaskError { m.Logger.Info("Wait for observer pod being deleted") - for i := 0; i < oceanbaseconst.DefaultStateWaitTimeout; i++ { + for i := 0; i < obcfg.GetConfig().Time.DefaultStateWaitTimeout; i++ { time.Sleep(time.Second) err := m.Client.Get(m.Ctx, m.generateNamespacedName(m.OBServer.Name), &corev1.Pod{}) if err != nil && kubeerrors.IsNotFound(err) { @@ -448,7 +449,7 @@ func ExpandPVC(m *OBServerManager) tasktypes.TaskError { func WaitForPVCResized(m *OBServerManager) tasktypes.TaskError { outer: - for i := 0; i < oceanbaseconst.DefaultStateWaitTimeout; i++ { + for i := 0; i < obcfg.GetConfig().Time.DefaultStateWaitTimeout; i++ { time.Sleep(time.Second) observerPVC, err := m.getPVCs() diff --git a/internal/resource/observer/utils.go b/internal/resource/observer/utils.go index 15f741141..9b4fe0c8e 100644 --- a/internal/resource/observer/utils.go +++ b/internal/resource/observer/utils.go @@ -27,6 +27,7 @@ import ( apitypes "github.com/oceanbase/ob-operator/api/types" v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" obagentconst "github.com/oceanbase/ob-operator/internal/const/obagent" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" secretconst "github.com/oceanbase/ob-operator/internal/const/secret" @@ -413,8 +414,8 @@ func (m *OBServerManager) createOBServerContainer(obcluster *v1alpha1.OBCluster) readinessProbeTCP.Port = intstr.FromInt(oceanbaseconst.SqlPort) readinessProbe := corev1.Probe{} readinessProbe.ProbeHandler.TCPSocket = &readinessProbeTCP - readinessProbe.PeriodSeconds = oceanbaseconst.ProbeCheckPeriodSeconds - readinessProbe.InitialDelaySeconds = oceanbaseconst.ProbeCheckDelaySeconds + readinessProbe.PeriodSeconds = int32(obcfg.GetConfig().Time.ProbeCheckPeriodSeconds) + readinessProbe.InitialDelaySeconds = int32(obcfg.GetConfig().Time.ProbeCheckDelaySeconds) readinessProbe.FailureThreshold = 32 startOBServerCmd := "/home/admin/oceanbase/bin/oceanbase-helper start" @@ -445,7 +446,7 @@ func (m *OBServerManager) createOBServerContainer(obcluster *v1alpha1.OBCluster) } envDataFile := corev1.EnvVar{ Name: "DATAFILE_SIZE", - Value: fmt.Sprintf("%dG", datafileSize*oceanbaseconst.InitialDataDiskUsePercent/oceanbaseconst.GigaConverter/100), + Value: fmt.Sprintf("%dG", datafileSize*int64(obcfg.GetConfig().Resource.InitialDataDiskUsePercent)/oceanbaseconst.GigaConverter/100), } clogDiskSize, ok := m.OBServer.Spec.OBServerTemplate.Storage.RedoLogStorage.Size.AsInt64() if !ok { @@ -453,7 +454,7 @@ func (m *OBServerManager) createOBServerContainer(obcluster *v1alpha1.OBCluster) } envLogDisk := corev1.EnvVar{ Name: "LOG_DISK_SIZE", - Value: fmt.Sprintf("%dG", clogDiskSize*oceanbaseconst.DefaultDiskUsePercent/oceanbaseconst.GigaConverter/100), + Value: fmt.Sprintf("%dG", clogDiskSize*int64(obcfg.GetConfig().Resource.DefaultDiskUsePercent)/oceanbaseconst.GigaConverter/100), } envClusterName := corev1.EnvVar{ Name: "CLUSTER_NAME", diff --git a/internal/resource/obtenant/obtenant_task.go b/internal/resource/obtenant/obtenant_task.go index e060c1ae6..8bcbe6351 100644 --- a/internal/resource/obtenant/obtenant_task.go +++ b/internal/resource/obtenant/obtenant_task.go @@ -27,6 +27,7 @@ import ( "github.com/oceanbase/ob-operator/api/constants" "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/const/status/tenant" @@ -246,39 +247,10 @@ func CheckPrimaryTenantLSIntegrity(m *OBTenantManager) tasktypes.TaskError { if m.OBTenant.Spec.Source == nil || m.OBTenant.Spec.Source.Tenant == nil { return errors.New("Primary tenant must have source tenant") } - tenantCR := &v1alpha1.OBTenant{} - err = m.Client.Get(m.Ctx, types.NamespacedName{ - Namespace: m.OBTenant.Namespace, - Name: *m.OBTenant.Spec.Source.Tenant, - }, tenantCR) + err = resourceutils.CheckTenantLSIntegrity(m.Ctx, m.Client, m.Logger, m.OBTenant.Namespace, *m.OBTenant.Spec.Source.Tenant) if err != nil { - return err - } - - con, err := m.getClusterSysClient() - if err != nil { - return err - } - lsDeletion, err := con.ListLSDeletion(int64(tenantCR.Status.TenantRecordInfo.TenantID)) - if err != nil { - return err + return errors.Wrap(err, "Check primary tenant LS integrity") } - if len(lsDeletion) > 0 { - return errors.New("LS deletion set is not empty, log is of not integrity") - } - logStats, err := con.ListLogStats(int64(tenantCR.Status.TenantRecordInfo.TenantID)) - if err != nil { - return err - } - if len(logStats) == 0 { - return errors.New("Log stats is empty, out of expectation") - } - for _, ls := range logStats { - if ls.BeginLSN != 0 { - return errors.New("Log stats begin SCN is not 0, log is of not integrity") - } - } - return nil } @@ -352,7 +324,7 @@ func WatchRestoreJobToFinish(m *OBTenantManager) tasktypes.TaskError { return false, nil } // Tenant restoring is in common quite a slow process, so we need to wait for a longer time - err = resourceutils.CheckJobWithTimeout(check, time.Second*oceanbaseconst.LocalityChangeTimeoutSeconds) + err = resourceutils.CheckJobWithTimeout(check, time.Second*time.Duration(obcfg.GetConfig().Time.LocalityChangeTimeoutSeconds)) if err != nil { return errors.Wrap(err, "Failed to wait for restore job to finish") } @@ -420,7 +392,7 @@ func UpgradeTenantIfNeeded(m *OBTenantManager) tasktypes.TaskError { if err != nil { return err } - maxWait5secTimes := oceanbaseconst.DefaultStateWaitTimeout/5 + 1 + maxWait5secTimes := obcfg.GetConfig().Time.DefaultStateWaitTimeout/5 + 1 outer: for i := 0; i < maxWait5secTimes; i++ { time.Sleep(5 * time.Second) diff --git a/internal/resource/obtenantoperation/obtenantoperation_task.go b/internal/resource/obtenantoperation/obtenantoperation_task.go index 561bfc17c..dadbd9f0a 100644 --- a/internal/resource/obtenantoperation/obtenantoperation_task.go +++ b/internal/resource/obtenantoperation/obtenantoperation_task.go @@ -22,6 +22,7 @@ import ( "github.com/oceanbase/ob-operator/api/constants" "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" obtenantresource "github.com/oceanbase/ob-operator/internal/resource/obtenant" resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" @@ -93,7 +94,7 @@ func CreateUsersForActivatedStandby(m *ObTenantOperationManager) tasktypes.TaskE } // Wait for the tenant to be ready - maxRetry := oceanbaseconst.TenantOpRetryTimes + maxRetry := obcfg.GetConfig().Time.TenantOpRetryTimes counter := 0 for counter < maxRetry { tenants, err := con.ListTenantWithName(m.Resource.Status.PrimaryTenant.Spec.TenantName) @@ -107,7 +108,7 @@ func CreateUsersForActivatedStandby(m *ObTenantOperationManager) tasktypes.TaskE if t.TenantType == "USER" && t.TenantRole == "PRIMARY" && t.SwitchoverStatus == "NORMAL" { break } - time.Sleep(oceanbaseconst.TenantOpRetryGapSeconds * time.Second) + time.Sleep(time.Duration(obcfg.GetConfig().Time.TenantOpRetryGapSeconds) * time.Second) counter++ } if counter >= maxRetry { @@ -145,7 +146,7 @@ func SwitchTenantsRole(m *ObTenantOperationManager) tasktypes.TaskError { if err != nil { return err } - maxRetry := oceanbaseconst.TenantOpRetryTimes + maxRetry := obcfg.GetConfig().Time.TenantOpRetryTimes counter := 0 for counter < maxRetry { primary, err := con.ListTenantWithName(m.Resource.Status.PrimaryTenant.Spec.TenantName) @@ -157,7 +158,7 @@ func SwitchTenantsRole(m *ObTenantOperationManager) tasktypes.TaskError { } p := primary[0] if p.TenantRole != "STANDBY" || p.SwitchoverStatus != "NORMAL" { - time.Sleep(oceanbaseconst.TenantOpRetryGapSeconds * time.Second) + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.TenantOpRetryGapSeconds)) counter++ } else { break @@ -185,7 +186,7 @@ func SwitchTenantsRole(m *ObTenantOperationManager) tasktypes.TaskError { } s := standby[0] if s.TenantRole != "PRIMARY" || s.SwitchoverStatus != "NORMAL" { - time.Sleep(oceanbaseconst.TenantOpRetryGapSeconds * time.Second) + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.TenantOpRetryGapSeconds)) counter++ } else { break @@ -290,7 +291,7 @@ func UpgradeTenant(m *ObTenantOperationManager) tasktypes.TaskError { if err != nil { return err } - maxWait5secTimes := oceanbaseconst.DefaultStateWaitTimeout/5 + 1 + maxWait5secTimes := obcfg.GetConfig().Time.DefaultStateWaitTimeout/5 + 1 outer: for i := 0; i < maxWait5secTimes; i++ { time.Sleep(5 * time.Second) diff --git a/internal/resource/obzone/obzone_task.go b/internal/resource/obzone/obzone_task.go index 5834a5d6e..d3f709fbe 100644 --- a/internal/resource/obzone/obzone_task.go +++ b/internal/resource/obzone/obzone_task.go @@ -21,6 +21,7 @@ import ( "k8s.io/client-go/util/retry" v1alpha1 "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" serverstatus "github.com/oceanbase/ob-operator/internal/const/status/observer" resourceutils "github.com/oceanbase/ob-operator/internal/resource/utils" @@ -174,7 +175,7 @@ func DeleteAllOBServer(m *OBZoneManager) tasktypes.TaskError { func WaitReplicaMatch(m *OBZoneManager) tasktypes.TaskError { matched := false - for i := 0; i < oceanbaseconst.ServerDeleteTimeoutSeconds; i++ { + for i := 0; i < obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds; i++ { obzone, err := m.getOBZone() if err != nil { m.Logger.Error(err, "Get obzone from K8s failed") @@ -196,7 +197,7 @@ func WaitReplicaMatch(m *OBZoneManager) tasktypes.TaskError { func WaitOBServerDeleted(m *OBZoneManager) tasktypes.TaskError { matched := false - for i := 0; i < oceanbaseconst.ServerDeleteTimeoutSeconds; i++ { + for i := 0; i < obcfg.GetConfig().Time.ServerDeleteTimeoutSeconds; i++ { obzone, err := m.getOBZone() if err != nil { m.Logger.Error(err, "Get obzone from K8s failed") @@ -265,7 +266,7 @@ func UpgradeOBServer(m *OBZoneManager) tasktypes.TaskError { } func WaitOBServerUpgraded(m *OBZoneManager) tasktypes.TaskError { - for i := 0; i < oceanbaseconst.TimeConsumingStateWaitTimeout; i++ { + for i := 0; i < obcfg.GetConfig().Time.TimeConsumingStateWaitTimeout; i++ { observerList, err := m.listOBServers() if err != nil { m.Logger.Error(err, "List observers failed") @@ -283,7 +284,7 @@ func WaitOBServerUpgraded(m *OBZoneManager) tasktypes.TaskError { m.Logger.Info("All server upgraded") return nil } - time.Sleep(oceanbaseconst.CommonCheckInterval * time.Second) + time.Sleep(time.Duration(obcfg.GetConfig().Time.CommonCheckInterval) * time.Second) } return errors.New("Wait all server upgraded timeout") } @@ -404,21 +405,21 @@ func DeleteLegacyOBServers(m *OBZoneManager) tasktypes.TaskError { } func WaitOBServerBootstrapReady(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.BootstrapReady, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.BootstrapReady, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitOBServerRunning(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.Running, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.Running, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitForOBServerScalingUp(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.ScaleUp, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.ScaleUp, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitForOBServerExpandingPVC(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.ExpandPVC, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.ExpandPVC, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } func WaitForOBServerMounting(m *OBZoneManager) tasktypes.TaskError { - return m.generateWaitOBServerStatusFunc(serverstatus.MountBackupVolume, oceanbaseconst.DefaultStateWaitTimeout)() + return m.generateWaitOBServerStatusFunc(serverstatus.MountBackupVolume, obcfg.GetConfig().Time.DefaultStateWaitTimeout)() } diff --git a/internal/resource/utils/annotations.go b/internal/resource/utils/annotations.go new file mode 100644 index 000000000..c95a0903b --- /dev/null +++ b/internal/resource/utils/annotations.go @@ -0,0 +1,37 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package utils + +import ( + corev1 "k8s.io/api/core/v1" + + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" +) + +func GetCNIFromAnnotation(pod *corev1.Pod) string { + _, found := pod.Annotations[oceanbaseconst.AnnotationCalicoValidate] + if found { + return oceanbaseconst.CNICalico + } + return oceanbaseconst.CNIUnknown +} + +func NeedAnnotation(pod *corev1.Pod, cni string) bool { + switch cni { + case oceanbaseconst.CNICalico: + _, found := pod.Annotations[oceanbaseconst.AnnotationCalicoIpAddrs] + return !found + default: + return false + } +} diff --git a/internal/resource/utils/connections.go b/internal/resource/utils/connections.go new file mode 100644 index 000000000..feb61d3ee --- /dev/null +++ b/internal/resource/utils/connections.go @@ -0,0 +1,160 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package utils + +import ( + "context" + "strconv" + "strings" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oceanbase/ob-operator/api/v1alpha1" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" + clusterstatus "github.com/oceanbase/ob-operator/internal/const/status/obcluster" + "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/connector" + "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/operation" +) + +func GetSysOperationClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster) (*operation.OceanbaseOperationManager, error) { + logger.V(oceanbaseconst.LogLevelTrace).Info("Get cluster sys client", "obCluster", obcluster) + var manager *operation.OceanbaseOperationManager + var err error + _, migrateAnnoExist := GetAnnotationField(obcluster, oceanbaseconst.AnnotationsSourceClusterAddress) + if migrateAnnoExist && obcluster.Status.Status == clusterstatus.MigrateFromExisting { + manager, err = getSysClientFromSourceCluster(c, logger, obcluster, oceanbaseconst.RootUser, oceanbaseconst.SysTenant, obcluster.Spec.UserSecrets.Root) + } else { + manager, err = getSysClient(c, logger, obcluster, oceanbaseconst.OperatorUser, oceanbaseconst.SysTenant, obcluster.Spec.UserSecrets.Operator) + } + return manager, err +} + +func GetTenantRootOperationClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, tenantName, credential string) (*operation.OceanbaseOperationManager, error) { + logger.V(oceanbaseconst.LogLevelTrace).Info("Get tenant root client", "obCluster", obcluster, "tenantName", tenantName, "credential", credential) + observerList := &v1alpha1.OBServerList{} + err := c.List(context.Background(), observerList, client.MatchingLabels{ + oceanbaseconst.LabelRefOBCluster: obcluster.Name, + }, client.InNamespace(obcluster.Namespace)) + if err != nil { + return nil, errors.Wrap(err, "Get observer list") + } + if len(observerList.Items) == 0 { + return nil, errors.Errorf("No observer belongs to cluster %s", obcluster.Name) + } + var password string + if credential != "" { + password, err = ReadPassword(c, obcluster.Namespace, credential) + if err != nil { + return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) + } + } + + var s *connector.OceanBaseDataSource + for _, observer := range observerList.Items { + address := observer.Status.GetConnectAddr() + switch obcluster.Status.Status { + case clusterstatus.New: + return nil, errors.New("Cluster is not bootstrapped") + case clusterstatus.Bootstrapped: + return nil, errors.New("Cluster is not initialized") + default: + s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, password, oceanbaseconst.DefaultDatabase) + } + // if err is nil, db connection is already checked available + rootClient, err := operation.GetOceanbaseOperationManager(s) + if err == nil && rootClient != nil { + rootClient.Logger = logger + return rootClient, nil + } + // err is not nil, try to use empty password + s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", oceanbaseconst.DefaultDatabase) + rootClient, err = operation.GetOceanbaseOperationManager(s) + if err == nil && rootClient != nil { + rootClient.Logger = logger + return rootClient, nil + } + } + return nil, errors.Errorf("Can not get root operation client of tenant %s in obcluster %s after checked all servers", tenantName, obcluster.Name) +} + +func getSysClientFromSourceCluster(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, userName, tenantName, secretName string) (*operation.OceanbaseOperationManager, error) { + sysClient, err := getSysClient(c, logger, obcluster, userName, tenantName, secretName) + if err == nil { + return sysClient, nil + } + password, err := ReadPassword(c, obcluster.Namespace, secretName) + if err != nil { + return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) + } + // when obcluster is under migrating, use address from annotation + migrateAnnoVal, _ := GetAnnotationField(obcluster, oceanbaseconst.AnnotationsSourceClusterAddress) + servers := strings.Split(migrateAnnoVal, ";") + for _, server := range servers { + addressParts := strings.Split(server, ":") + if len(addressParts) != 2 { + return nil, errors.New("Parse oceanbase cluster connect address failed") + } + sqlPort, err := strconv.ParseInt(addressParts[1], 10, 64) + if err != nil { + return nil, errors.New("Parse sql port of obcluster failed") + } + s := connector.NewOceanBaseDataSource(addressParts[0], sqlPort, userName, tenantName, password, oceanbaseconst.DefaultDatabase) + // if err is nil, db connection is already checked available + sysClient, err := operation.GetOceanbaseOperationManager(s) + if err == nil && sysClient != nil { + sysClient.Logger = logger + return sysClient, nil + } + logger.Error(err, "Get operation manager from existing obcluster") + } + return nil, errors.Errorf("Failed to get sys client from existing obcluster, address: %s", migrateAnnoVal) +} + +func getSysClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, userName, tenantName, secretName string) (*operation.OceanbaseOperationManager, error) { + observerList := &v1alpha1.OBServerList{} + err := c.List(context.Background(), observerList, client.MatchingLabels{ + oceanbaseconst.LabelRefOBCluster: obcluster.Name, + }, client.InNamespace(obcluster.Namespace)) + if err != nil { + return nil, errors.Wrap(err, "Get observer list") + } + if len(observerList.Items) == 0 { + return nil, errors.Errorf("No observer belongs to cluster %s", obcluster.Name) + } + + var s *connector.OceanBaseDataSource + password, err := ReadPassword(c, obcluster.Namespace, secretName) + if err != nil { + return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) + } + for _, observer := range observerList.Items { + address := observer.Status.GetConnectAddr() + switch obcluster.Status.Status { + case clusterstatus.New: + s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", "") + case clusterstatus.Bootstrapped: + s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", oceanbaseconst.DefaultDatabase) + default: + s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, userName, tenantName, password, oceanbaseconst.DefaultDatabase) + } + // if err is nil, db connection is already checked available + sysClient, err := operation.GetOceanbaseOperationManager(s) + if err == nil && sysClient != nil { + sysClient.Logger = logger + return sysClient, nil + } + } + return nil, errors.Errorf("Can not get oceanbase operation manager of obcluster %s after checked all servers", obcluster.Name) +} diff --git a/internal/resource/utils/jobs.go b/internal/resource/utils/jobs.go new file mode 100644 index 000000000..0b1ce6414 --- /dev/null +++ b/internal/resource/utils/jobs.go @@ -0,0 +1,258 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package utils + +import ( + "bytes" + "context" + "fmt" + "io" + "time" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oceanbase/ob-operator/api/v1alpha1" + obcfg "github.com/oceanbase/ob-operator/internal/config/operator" + cmdconst "github.com/oceanbase/ob-operator/internal/const/cmd" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" + k8sclient "github.com/oceanbase/ob-operator/pkg/k8s/client" + "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/model" +) + +func GetJob(ctx context.Context, c client.Client, namespace string, jobName string) (*batchv1.Job, error) { + job := &batchv1.Job{} + err := c.Get(ctx, types.NamespacedName{ + Namespace: namespace, + Name: jobName, + }, job) + return job, err +} + +type JobContainerVolumes struct { + VolumeMounts []corev1.VolumeMount + Volumes []corev1.Volume +} + +func RunJob(ctx context.Context, c client.Client, logger *logr.Logger, namespace string, jobName string, image string, cmd string, volumeConfigs ...JobContainerVolumes) (output string, exitCode int32, err error) { + fullJobName := fmt.Sprintf("%s-%s", jobName, rand.String(6)) + var backoffLimit int32 + var ttl int32 = 300 + var mounts []corev1.VolumeMount + var volumes []corev1.Volume + for _, vc := range volumeConfigs { + mounts = append(mounts, vc.VolumeMounts...) + volumes = append(volumes, vc.Volumes...) + } + + container := corev1.Container{ + Name: "job-runner", + Image: image, + Command: []string{"bash", "-c", cmd}, + VolumeMounts: mounts, + } + job := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: fullJobName, + Namespace: namespace, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + Volumes: volumes, + }, + }, + BackoffLimit: &backoffLimit, + TTLSecondsAfterFinished: &ttl, + }, + } + + err = c.Create(ctx, &job) + if err != nil { + return "", int32(cmdconst.ExitCodeNotExecuted), errors.Wrapf(err, "failed to create job of image: %s", image) + } + + var jobObject *batchv1.Job + for i := 0; i < obcfg.GetConfig().Time.CheckJobMaxRetries; i++ { + jobObject, err = GetJob(ctx, c, namespace, fullJobName) + if err != nil { + logger.Error(err, "Failed to get job") + // return errors.Wrapf(err, "Failed to get run upgrade script job for obcluster %s", obcluster.Name) + } + if jobObject.Status.Succeeded == 0 && jobObject.Status.Failed == 0 { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job is still running") + } else { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job finished") + break + } + time.Sleep(time.Second * time.Duration(obcfg.GetConfig().Time.CheckJobInterval)) + } + clientSet := k8sclient.GetClient() + podList, err := clientSet.ClientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("job-name=%s", fullJobName), + }) + if err != nil || len(podList.Items) == 0 { + return "", int32(cmdconst.ExitCodeNotExecuted), errors.Wrapf(err, "failed to get pods of job %s", jobName) + } + var outputBuffer bytes.Buffer + podLogOpts := corev1.PodLogOptions{} + pod := podList.Items[0] + for _, cs := range pod.Status.ContainerStatuses { + if cs.Name == "job-runner" { + exitCode = cs.State.Terminated.ExitCode + } + } + if jobObject.Status.Succeeded == 1 { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job succeeded", "job", fullJobName) + res := clientSet.ClientSet.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) + logs, err := res.Stream(ctx) + if err != nil { + logger.Error(err, "Failed to get job logs") + } else { + defer logs.Close() + _, err = io.Copy(&outputBuffer, logs) + if err != nil { + logger.Error(err, "Failed to copy logs") + } + output = outputBuffer.String() + } + } else { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job failed", "job", fullJobName) + return "", exitCode, errors.Wrapf(err, "Failed to run job %s", fullJobName) + } + return output, exitCode, nil +} + +func ExecuteUpgradeScript(ctx context.Context, c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, filepath string, extraOpt string) error { + password, err := ReadPassword(c, obcluster.Namespace, obcluster.Spec.UserSecrets.Root) + if err != nil { + return errors.Wrapf(err, "Failed to get root password") + } + oceanbaseOperationManager, err := GetSysOperationClient(c, logger, obcluster) + if err != nil { + return errors.Wrapf(err, "Get operation manager failed for obcluster %s", obcluster.Name) + } + observers, err := oceanbaseOperationManager.ListServers() + if err != nil { + return errors.Wrapf(err, "Failed to list all servers for obcluster %s", obcluster.Name) + } + var rootserver model.OBServer + for _, observer := range observers { + rootserver = observer + if observer.WithRootserver > 0 { + logger.Info(fmt.Sprintf("Found rootserver, %s:%d", observer.Ip, observer.Port)) + break + } + } + + jobName := fmt.Sprintf("%s-%s", "script-runner", rand.String(6)) + var backoffLimit int32 + var ttl int32 = 300 + container := corev1.Container{ + Name: "script-runner", + Image: obcluster.Spec.OBServerTemplate.Image, + Command: []string{"bash", "-c", fmt.Sprintf("python2 %s -h%s -P%d -uroot -p'%s' %s", filepath, rootserver.Ip, rootserver.SqlPort, password, extraOpt)}, + } + job := batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: jobName, + Namespace: obcluster.Namespace, + OwnerReferences: []metav1.OwnerReference{{ + Kind: obcluster.Kind, + APIVersion: obcluster.APIVersion, + Name: obcluster.Name, + UID: obcluster.UID, + }}, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{container}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + BackoffLimit: &backoffLimit, + TTLSecondsAfterFinished: &ttl, + }, + } + logger.Info("Create run upgrade script job", "script", filepath) + err = c.Create(ctx, &job) + if err != nil { + return errors.Wrapf(err, "Failed to create run upgrade script job for obcluster %s", obcluster.Name) + } + + var jobObject *batchv1.Job + check := func() (bool, error) { + jobObject, err = GetJob(ctx, c, obcluster.Namespace, jobName) + if err != nil { + return false, errors.Wrapf(err, "Failed to get run upgrade script job for obcluster %s", obcluster.Name) + } + if jobObject.Status.Succeeded == 0 && jobObject.Status.Failed == 0 { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job is still running") + return false, nil + } else if jobObject.Status.Succeeded == 1 { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job succeeded") + return true, nil + } else { + logger.V(oceanbaseconst.LogLevelDebug).Info("Job failed", "job", jobName) + return false, errors.Wrap(err, "Failed to run upgrade script job") + } + } + err = CheckJobWithTimeout(check, time.Second*time.Duration(obcfg.GetConfig().Time.WaitForJobTimeoutSeconds)) + if err != nil { + return errors.Wrap(err, "Failed to wait for job to finish") + } + return nil +} + +type CheckJobFunc func() (bool, error) + +// CheckJobWithTimeout checks job with timeout, return error if timeout or job failed. +// First parameter is the function to check job status, return true if job finished, false if not. +// Second parameter is the timeout duration, default to 1800s. +// Third parameter is the interval to check job status, default to 3s. +func CheckJobWithTimeout(f CheckJobFunc, ds ...time.Duration) error { + timeout := time.Second * time.Duration(obcfg.GetConfig().Time.DefaultStateWaitTimeout) + interval := time.Second * time.Duration(obcfg.GetConfig().Time.CheckJobInterval) + if len(ds) > 0 { + timeout = ds[0] + } + if len(ds) > 1 { + interval = ds[1] + } + timer := time.NewTimer(timeout) + defer timer.Stop() + for { + select { + case <-timer.C: + return errors.New("Timeout to wait for job") + default: + time.Sleep(interval) + finished, err := f() + if err != nil { + return err + } + if finished { + return nil + } + } + } +} diff --git a/internal/resource/utils/secret.go b/internal/resource/utils/secret.go new file mode 100644 index 000000000..e8daf500a --- /dev/null +++ b/internal/resource/utils/secret.go @@ -0,0 +1,36 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package utils + +import ( + "context" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + secretconst "github.com/oceanbase/ob-operator/internal/const/secret" +) + +func ReadPassword(c client.Client, namespace, secretName string) (string, error) { + secret := &corev1.Secret{} + err := c.Get(context.Background(), types.NamespacedName{ + Namespace: namespace, + Name: secretName, + }, secret) + if err != nil { + return "", errors.Wrapf(err, "Get password from secret %s failed", secretName) + } + return string(secret.Data[secretconst.PasswordKeyName]), err +} diff --git a/internal/resource/utils/tenant.go b/internal/resource/utils/tenant.go new file mode 100644 index 000000000..88ffa0d38 --- /dev/null +++ b/internal/resource/utils/tenant.go @@ -0,0 +1,131 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package utils + +import ( + "context" + "fmt" + "strings" + + "github.com/go-logr/logr" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/oceanbase/ob-operator/api/v1alpha1" + oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" + "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/operation" +) + +// GetTenantRestoreSource gets restore source from tenant CR. If tenantCR is in form of ns/name, the parameter ns is ignored. +func GetTenantRestoreSource(ctx context.Context, clt client.Client, logger *logr.Logger, ns, tenantCR string) (string, error) { + tenant, err := getOBTenantInK8s(ctx, clt, ns, tenantCR) + if err != nil { + return "", err + } + con, err := getClusterSysConOfOBTenant(ctx, clt, logger, tenant) + if err != nil { + return "", err + } + // Get ip_list of target tenant + aps, err := con.ListTenantAccessPoints(tenant.Spec.TenantName) + if err != nil { + return "", err + } + ipList := make([]string, 0) + for _, ap := range aps { + ipList = append(ipList, fmt.Sprintf("%s:%d", ap.SvrIP, ap.SqlPort)) + } + standbyRoPwd, err := ReadPassword(clt, ns, tenant.Status.Credentials.StandbyRO) + if err != nil { + logger.Error(err, "Failed to read standby ro password") + return "", err + } + // Set restore source + restoreSource := fmt.Sprintf("SERVICE=%s USER=%s@%s PASSWORD=%s", strings.Join(ipList, ";"), oceanbaseconst.StandbyROUser, tenant.Spec.TenantName, standbyRoPwd) + + return restoreSource, nil +} + +// CheckTenantLSIntegrity checks LS integrity of tenant CR. If tenantCR is in form of ns/name, the parameter ns is ignored. +func CheckTenantLSIntegrity(ctx context.Context, clt client.Client, logger *logr.Logger, ns, tenantCR string) error { + tenant, err := getOBTenantInK8s(ctx, clt, ns, tenantCR) + if err != nil { + return err + } + con, err := getClusterSysConOfOBTenant(ctx, clt, logger, tenant) + if err != nil { + return err + } + // Check LS integrity + lsDeletion, err := con.ListLSDeletion(int64(tenant.Status.TenantRecordInfo.TenantID)) + if err != nil { + return err + } + if len(lsDeletion) > 0 { + return errors.New("LS deletion set is not empty, log is of not integrity") + } + logStats, err := con.ListLogStats(int64(tenant.Status.TenantRecordInfo.TenantID)) + if err != nil { + return err + } + if len(logStats) == 0 { + return errors.New("Log stats is empty, out of expectation") + } + for _, ls := range logStats { + if ls.BeginLSN != 0 { + return errors.New("Log stats begin SCN is not 0, log is of not integrity") + } + } + + return nil +} + +func getOBTenantInK8s(ctx context.Context, clt client.Client, ns, tenantCR string) (*v1alpha1.OBTenant, error) { + finalNs := ns + finalTenantCR := tenantCR + splits := strings.Split(tenantCR, "/") + if len(splits) == 2 { + finalNs = splits[0] + finalTenantCR = splits[1] + } + var err error + tenant := &v1alpha1.OBTenant{} + err = clt.Get(ctx, types.NamespacedName{ + Namespace: finalNs, + Name: finalTenantCR, + }, tenant) + if err != nil { + if client.IgnoreNotFound(err) != nil { + return nil, err + } + return nil, errors.New("tenant not found") + } + return tenant, nil +} + +func getClusterSysConOfOBTenant(ctx context.Context, clt client.Client, logger *logr.Logger, tenant *v1alpha1.OBTenant) (*operation.OceanbaseOperationManager, error) { + obcluster := &v1alpha1.OBCluster{} + err := clt.Get(ctx, types.NamespacedName{ + Namespace: tenant.Namespace, + Name: tenant.Spec.ClusterName, + }, obcluster) + if err != nil { + return nil, errors.Wrap(err, "get obcluster") + } + con, err := GetSysOperationClient(clt, logger, obcluster) + if err != nil { + return nil, errors.Wrap(err, "get oceanbase operation manager") + } + return con, nil +} diff --git a/internal/resource/utils/util.go b/internal/resource/utils/util.go deleted file mode 100644 index 38fdffdf1..000000000 --- a/internal/resource/utils/util.go +++ /dev/null @@ -1,466 +0,0 @@ -/* -Copyright (c) 2023 OceanBase -ob-operator is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. -*/ - -package utils - -import ( - "bytes" - "context" - "fmt" - "io" - "strconv" - "strings" - "time" - - cmdconst "github.com/oceanbase/ob-operator/internal/const/cmd" - - "github.com/go-logr/logr" - "github.com/pkg/errors" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/oceanbase/ob-operator/api/v1alpha1" - oceanbaseconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" - secretconst "github.com/oceanbase/ob-operator/internal/const/secret" - clusterstatus "github.com/oceanbase/ob-operator/internal/const/status/obcluster" - k8sclient "github.com/oceanbase/ob-operator/pkg/k8s/client" - "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/connector" - "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/model" - "github.com/oceanbase/ob-operator/pkg/oceanbase-sdk/operation" -) - -func ReadPassword(c client.Client, namespace, secretName string) (string, error) { - secret := &corev1.Secret{} - err := c.Get(context.Background(), types.NamespacedName{ - Namespace: namespace, - Name: secretName, - }, secret) - if err != nil { - return "", errors.Wrapf(err, "Get password from secret %s failed", secretName) - } - return string(secret.Data[secretconst.PasswordKeyName]), err -} - -func GetSysOperationClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster) (*operation.OceanbaseOperationManager, error) { - logger.V(oceanbaseconst.LogLevelTrace).Info("Get cluster sys client", "obCluster", obcluster) - var manager *operation.OceanbaseOperationManager - var err error - _, migrateAnnoExist := GetAnnotationField(obcluster, oceanbaseconst.AnnotationsSourceClusterAddress) - if migrateAnnoExist && obcluster.Status.Status == clusterstatus.MigrateFromExisting { - manager, err = getSysClientFromSourceCluster(c, logger, obcluster, oceanbaseconst.RootUser, oceanbaseconst.SysTenant, obcluster.Spec.UserSecrets.Root) - } else { - manager, err = getSysClient(c, logger, obcluster, oceanbaseconst.OperatorUser, oceanbaseconst.SysTenant, obcluster.Spec.UserSecrets.Operator) - } - return manager, err -} - -func GetTenantRootOperationClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, tenantName, credential string) (*operation.OceanbaseOperationManager, error) { - logger.V(oceanbaseconst.LogLevelTrace).Info("Get tenant root client", "obCluster", obcluster, "tenantName", tenantName, "credential", credential) - observerList := &v1alpha1.OBServerList{} - err := c.List(context.Background(), observerList, client.MatchingLabels{ - oceanbaseconst.LabelRefOBCluster: obcluster.Name, - }, client.InNamespace(obcluster.Namespace)) - if err != nil { - return nil, errors.Wrap(err, "Get observer list") - } - if len(observerList.Items) == 0 { - return nil, errors.Errorf("No observer belongs to cluster %s", obcluster.Name) - } - var password string - if credential != "" { - password, err = ReadPassword(c, obcluster.Namespace, credential) - if err != nil { - return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) - } - } - - var s *connector.OceanBaseDataSource - for _, observer := range observerList.Items { - address := observer.Status.GetConnectAddr() - switch obcluster.Status.Status { - case clusterstatus.New: - return nil, errors.New("Cluster is not bootstrapped") - case clusterstatus.Bootstrapped: - return nil, errors.New("Cluster is not initialized") - default: - s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, password, oceanbaseconst.DefaultDatabase) - } - // if err is nil, db connection is already checked available - rootClient, err := operation.GetOceanbaseOperationManager(s) - if err == nil && rootClient != nil { - rootClient.Logger = logger - return rootClient, nil - } - // err is not nil, try to use empty password - s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", oceanbaseconst.DefaultDatabase) - rootClient, err = operation.GetOceanbaseOperationManager(s) - if err == nil && rootClient != nil { - rootClient.Logger = logger - return rootClient, nil - } - } - return nil, errors.Errorf("Can not get root operation client of tenant %s in obcluster %s after checked all servers", tenantName, obcluster.Name) -} - -func getSysClientFromSourceCluster(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, userName, tenantName, secretName string) (*operation.OceanbaseOperationManager, error) { - sysClient, err := getSysClient(c, logger, obcluster, userName, tenantName, secretName) - if err == nil { - return sysClient, nil - } - password, err := ReadPassword(c, obcluster.Namespace, secretName) - if err != nil { - return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) - } - // when obcluster is under migrating, use address from annotation - migrateAnnoVal, _ := GetAnnotationField(obcluster, oceanbaseconst.AnnotationsSourceClusterAddress) - servers := strings.Split(migrateAnnoVal, ";") - for _, server := range servers { - addressParts := strings.Split(server, ":") - if len(addressParts) != 2 { - return nil, errors.New("Parse oceanbase cluster connect address failed") - } - sqlPort, err := strconv.ParseInt(addressParts[1], 10, 64) - if err != nil { - return nil, errors.New("Parse sql port of obcluster failed") - } - s := connector.NewOceanBaseDataSource(addressParts[0], sqlPort, userName, tenantName, password, oceanbaseconst.DefaultDatabase) - // if err is nil, db connection is already checked available - sysClient, err := operation.GetOceanbaseOperationManager(s) - if err == nil && sysClient != nil { - sysClient.Logger = logger - return sysClient, nil - } - logger.Error(err, "Get operation manager from existing obcluster") - } - return nil, errors.Errorf("Failed to get sys client from existing obcluster, address: %s", migrateAnnoVal) -} - -func getSysClient(c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, userName, tenantName, secretName string) (*operation.OceanbaseOperationManager, error) { - observerList := &v1alpha1.OBServerList{} - err := c.List(context.Background(), observerList, client.MatchingLabels{ - oceanbaseconst.LabelRefOBCluster: obcluster.Name, - }, client.InNamespace(obcluster.Namespace)) - if err != nil { - return nil, errors.Wrap(err, "Get observer list") - } - if len(observerList.Items) == 0 { - return nil, errors.Errorf("No observer belongs to cluster %s", obcluster.Name) - } - - var s *connector.OceanBaseDataSource - password, err := ReadPassword(c, obcluster.Namespace, secretName) - if err != nil { - return nil, errors.Wrapf(err, "Read password to get oceanbase operation manager of cluster %s", obcluster.Name) - } - for _, observer := range observerList.Items { - address := observer.Status.GetConnectAddr() - switch obcluster.Status.Status { - case clusterstatus.New: - s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", "") - case clusterstatus.Bootstrapped: - s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, oceanbaseconst.RootUser, tenantName, "", oceanbaseconst.DefaultDatabase) - default: - s = connector.NewOceanBaseDataSource(address, oceanbaseconst.SqlPort, userName, tenantName, password, oceanbaseconst.DefaultDatabase) - } - // if err is nil, db connection is already checked available - sysClient, err := operation.GetOceanbaseOperationManager(s) - if err == nil && sysClient != nil { - sysClient.Logger = logger - return sysClient, nil - } - } - return nil, errors.Errorf("Can not get oceanbase operation manager of obcluster %s after checked all servers", obcluster.Name) -} - -func GetJob(ctx context.Context, c client.Client, namespace string, jobName string) (*batchv1.Job, error) { - job := &batchv1.Job{} - err := c.Get(ctx, types.NamespacedName{ - Namespace: namespace, - Name: jobName, - }, job) - return job, err -} - -func RunJob(ctx context.Context, c client.Client, logger *logr.Logger, namespace string, jobName string, image string, cmd string) (output string, exitCode int32, err error) { - fullJobName := fmt.Sprintf("%s-%s", jobName, rand.String(6)) - var backoffLimit int32 - var ttl int32 = 300 - container := corev1.Container{ - Name: "job-runner", - Image: image, - Command: []string{"bash", "-c", cmd}, - } - job := batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: fullJobName, - Namespace: namespace, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{container}, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, - BackoffLimit: &backoffLimit, - TTLSecondsAfterFinished: &ttl, - }, - } - - err = c.Create(ctx, &job) - if err != nil { - return "", int32(cmdconst.ExitCodeNotExecuted), errors.Wrapf(err, "failed to create job of image: %s", image) - } - - var jobObject *batchv1.Job - for i := 0; i < oceanbaseconst.CheckJobMaxRetries; i++ { - jobObject, err = GetJob(ctx, c, namespace, fullJobName) - if err != nil { - logger.Error(err, "Failed to get job") - // return errors.Wrapf(err, "Failed to get run upgrade script job for obcluster %s", obcluster.Name) - } - if jobObject.Status.Succeeded == 0 && jobObject.Status.Failed == 0 { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job is still running") - } else { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job finished") - break - } - time.Sleep(time.Second * oceanbaseconst.CheckJobInterval) - } - clientSet := k8sclient.GetClient() - podList, err := clientSet.ClientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ - LabelSelector: fmt.Sprintf("job-name=%s", fullJobName), - }) - if err != nil || len(podList.Items) == 0 { - return "", int32(cmdconst.ExitCodeNotExecuted), errors.Wrapf(err, "failed to get pods of job %s", jobName) - } - var outputBuffer bytes.Buffer - podLogOpts := corev1.PodLogOptions{} - pod := podList.Items[0] - for _, cs := range pod.Status.ContainerStatuses { - if cs.Name == "job-runner" { - exitCode = cs.State.Terminated.ExitCode - } - } - if jobObject.Status.Succeeded == 1 { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job succeeded", "job", fullJobName) - res := clientSet.ClientSet.CoreV1().Pods(namespace).GetLogs(pod.Name, &podLogOpts) - logs, err := res.Stream(ctx) - if err != nil { - logger.Error(err, "Failed to get job logs") - } else { - defer logs.Close() - _, err = io.Copy(&outputBuffer, logs) - if err != nil { - logger.Error(err, "Failed to copy logs") - } - output = outputBuffer.String() - } - } else { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job failed", "job", fullJobName) - return "", exitCode, errors.Wrapf(err, "Failed to run job %s", fullJobName) - } - return output, exitCode, nil -} - -func ExecuteUpgradeScript(ctx context.Context, c client.Client, logger *logr.Logger, obcluster *v1alpha1.OBCluster, filepath string, extraOpt string) error { - password, err := ReadPassword(c, obcluster.Namespace, obcluster.Spec.UserSecrets.Root) - if err != nil { - return errors.Wrapf(err, "Failed to get root password") - } - oceanbaseOperationManager, err := GetSysOperationClient(c, logger, obcluster) - if err != nil { - return errors.Wrapf(err, "Get operation manager failed for obcluster %s", obcluster.Name) - } - observers, err := oceanbaseOperationManager.ListServers() - if err != nil { - return errors.Wrapf(err, "Failed to list all servers for obcluster %s", obcluster.Name) - } - var rootserver model.OBServer - for _, observer := range observers { - rootserver = observer - if observer.WithRootserver > 0 { - logger.Info(fmt.Sprintf("Found rootserver, %s:%d", observer.Ip, observer.Port)) - break - } - } - - jobName := fmt.Sprintf("%s-%s", "script-runner", rand.String(6)) - var backoffLimit int32 - var ttl int32 = 300 - container := corev1.Container{ - Name: "script-runner", - Image: obcluster.Spec.OBServerTemplate.Image, - Command: []string{"bash", "-c", fmt.Sprintf("python2 %s -h%s -P%d -uroot -p'%s' %s", filepath, rootserver.Ip, rootserver.SqlPort, password, extraOpt)}, - } - job := batchv1.Job{ - ObjectMeta: metav1.ObjectMeta{ - Name: jobName, - Namespace: obcluster.Namespace, - OwnerReferences: []metav1.OwnerReference{{ - Kind: obcluster.Kind, - APIVersion: obcluster.APIVersion, - Name: obcluster.Name, - UID: obcluster.UID, - }}, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - Containers: []corev1.Container{container}, - RestartPolicy: corev1.RestartPolicyNever, - }, - }, - BackoffLimit: &backoffLimit, - TTLSecondsAfterFinished: &ttl, - }, - } - logger.Info("Create run upgrade script job", "script", filepath) - err = c.Create(ctx, &job) - if err != nil { - return errors.Wrapf(err, "Failed to create run upgrade script job for obcluster %s", obcluster.Name) - } - - var jobObject *batchv1.Job - check := func() (bool, error) { - jobObject, err = GetJob(ctx, c, obcluster.Namespace, jobName) - if err != nil { - return false, errors.Wrapf(err, "Failed to get run upgrade script job for obcluster %s", obcluster.Name) - } - if jobObject.Status.Succeeded == 0 && jobObject.Status.Failed == 0 { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job is still running") - return false, nil - } else if jobObject.Status.Succeeded == 1 { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job succeeded") - return true, nil - } else { - logger.V(oceanbaseconst.LogLevelDebug).Info("Job failed", "job", jobName) - return false, errors.Wrap(err, "Failed to run upgrade script job") - } - } - err = CheckJobWithTimeout(check, time.Second*oceanbaseconst.WaitForJobTimeoutSeconds) - if err != nil { - return errors.Wrap(err, "Failed to wait for job to finish") - } - return nil -} - -type CheckJobFunc func() (bool, error) - -// CheckJobWithTimeout checks job with timeout, return error if timeout or job failed. -// First parameter is the function to check job status, return true if job finished, false if not. -// Second parameter is the timeout duration, default to 1800s. -// Third parameter is the interval to check job status, default to 3s. -func CheckJobWithTimeout(f CheckJobFunc, ds ...time.Duration) error { - timeout := time.Second * oceanbaseconst.DefaultStateWaitTimeout - interval := time.Second * oceanbaseconst.CheckJobInterval - if len(ds) > 0 { - timeout = ds[0] - } - if len(ds) > 1 { - interval = ds[1] - } - timer := time.NewTimer(timeout) - defer timer.Stop() - for { - select { - case <-timer.C: - return errors.New("Timeout to wait for job") - default: - time.Sleep(interval) - finished, err := f() - if err != nil { - return err - } - if finished { - return nil - } - } - } -} - -func GetCNIFromAnnotation(pod *corev1.Pod) string { - _, found := pod.Annotations[oceanbaseconst.AnnotationCalicoValidate] - if found { - return oceanbaseconst.CNICalico - } - return oceanbaseconst.CNIUnknown -} - -func NeedAnnotation(pod *corev1.Pod, cni string) bool { - switch cni { - case oceanbaseconst.CNICalico: - _, found := pod.Annotations[oceanbaseconst.AnnotationCalicoIpAddrs] - return !found - default: - return false - } -} - -// GetTenantRestoreSource gets restore source from tenant CR. If tenantCR is in form of ns/name, the parameter ns is ignored. -func GetTenantRestoreSource(ctx context.Context, clt client.Client, logger *logr.Logger, ns, tenantCR string) (string, error) { - finalNs := ns - finalTenantCR := tenantCR - splits := strings.Split(tenantCR, "/") - if len(splits) == 2 { - finalNs = splits[0] - finalTenantCR = splits[1] - } - var restoreSource string - var err error - - primary := &v1alpha1.OBTenant{} - err = clt.Get(ctx, types.NamespacedName{ - Namespace: finalNs, - Name: finalTenantCR, - }, primary) - if err != nil { - if client.IgnoreNotFound(err) != nil { - return "", err - } - } else { - obcluster := &v1alpha1.OBCluster{} - err := clt.Get(ctx, types.NamespacedName{ - Namespace: finalNs, - Name: primary.Spec.ClusterName, - }, obcluster) - if err != nil { - return "", errors.Wrap(err, "get obcluster") - } - con, err := GetSysOperationClient(clt, logger, obcluster) - if err != nil { - return "", errors.Wrap(err, "get oceanbase operation manager") - } - // Get ip_list from primary tenant - aps, err := con.ListTenantAccessPoints(primary.Spec.TenantName) - if err != nil { - return "", err - } - ipList := make([]string, 0) - for _, ap := range aps { - ipList = append(ipList, fmt.Sprintf("%s:%d", ap.SvrIP, ap.SqlPort)) - } - standbyRoPwd, err := ReadPassword(clt, ns, primary.Status.Credentials.StandbyRO) - if err != nil { - logger.Error(err, "Failed to read standby ro password") - return "", err - } - // Set restore source - restoreSource = fmt.Sprintf("SERVICE=%s USER=%s@%s PASSWORD=%s", strings.Join(ipList, ";"), oceanbaseconst.StandbyROUser, primary.Spec.TenantName, standbyRoPwd) - } - - return restoreSource, nil -} diff --git a/pkg/coordinator/config.go b/pkg/coordinator/config.go new file mode 100644 index 000000000..0c46282df --- /dev/null +++ b/pkg/coordinator/config.go @@ -0,0 +1,39 @@ +/* +Copyright (c) 2023 OceanBase +ob-operator is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. +*/ + +package coordinator + +import "time" + +type config struct { + NormalRequeueDuration time.Duration + ExecutionRequeueDuration time.Duration + + TaskMaxRetryTimes int + TaskRetryBackoffThreshold int +} + +var cfg = &config{ + NormalRequeueDuration: 30 * time.Second, + ExecutionRequeueDuration: 1 * time.Second, + + TaskMaxRetryTimes: 99, + TaskRetryBackoffThreshold: 16, +} + +func SetMaxRetryTimes(maxRetryTimes int) { + cfg.TaskMaxRetryTimes = maxRetryTimes +} + +func SetRetryBackoffThreshold(retryBackoffThreshold int) { + cfg.TaskRetryBackoffThreshold = retryBackoffThreshold +} diff --git a/pkg/coordinator/coordinator.go b/pkg/coordinator/coordinator.go index 945a8c755..707ba5b62 100644 --- a/pkg/coordinator/coordinator.go +++ b/pkg/coordinator/coordinator.go @@ -13,26 +13,16 @@ See the Mulan PSL v2 for more details. package coordinator import ( - "time" - "github.com/go-logr/logr" "github.com/pkg/errors" ctrl "sigs.k8s.io/controller-runtime" - obconst "github.com/oceanbase/ob-operator/internal/const/oceanbase" "github.com/oceanbase/ob-operator/pkg/task" taskstatus "github.com/oceanbase/ob-operator/pkg/task/const/status" "github.com/oceanbase/ob-operator/pkg/task/const/strategy" tasktypes "github.com/oceanbase/ob-operator/pkg/task/types" ) -const ( - // If no task flow, requeue after 30 sec. - NormalRequeueDuration = 30 * time.Second - // In task flow, requeue after 1 sec. - ExecutionRequeueDuration = 1 * time.Second -) - type Coordinator struct { Manager ResourceManager Logger *logr.Logger @@ -56,7 +46,7 @@ func NewCoordinator(m ResourceManager, logger *logr.Logger) *Coordinator { // will be requeued using exponential backoff. func (c *Coordinator) Coordinate() (ctrl.Result, error) { result := ctrl.Result{ - RequeueAfter: ExecutionRequeueDuration, + RequeueAfter: cfg.ExecutionRequeueDuration, } var f *tasktypes.TaskFlow var err error @@ -69,16 +59,16 @@ func (c *Coordinator) Coordinate() (ctrl.Result, error) { return result, errors.Wrap(err, "Get task flow") } else if f == nil { // No need to execute task flow - result.RequeueAfter = NormalRequeueDuration + result.RequeueAfter = cfg.NormalRequeueDuration } else { - c.Logger.V(obconst.LogLevelDebug).Info("Set operation context", "operation context", f.OperationContext) + c.Logger.V(1).Info("Set operation context", "operation context", f.OperationContext) c.Manager.SetOperationContext(f.OperationContext) // execution errors reflects by task status c.executeTaskFlow(f) // if task status is `failed`, requeue after 2 ^ min(retryCount, threshold) * 500ms. // maximum backoff time is about 2 hrs with 14 as threshold. if f.OperationContext.OnFailure.RetryCount > 0 && f.OperationContext.TaskStatus == taskstatus.Failed { - result.RequeueAfter = ExecutionRequeueDuration * (1 << Min(f.OperationContext.OnFailure.RetryCount, obconst.TaskRetryBackoffThreshold)) + result.RequeueAfter = cfg.ExecutionRequeueDuration * (1 << Min(f.OperationContext.OnFailure.RetryCount, cfg.TaskRetryBackoffThreshold)) } } } @@ -88,7 +78,7 @@ func (c *Coordinator) Coordinate() (ctrl.Result, error) { if err != nil { return result, errors.Wrapf(err, "Check and update finalizer failed") } - result.RequeueAfter = ExecutionRequeueDuration + result.RequeueAfter = cfg.ExecutionRequeueDuration } err = c.cleanTaskResultMap(f) if err != nil { @@ -100,9 +90,9 @@ func (c *Coordinator) Coordinate() (ctrl.Result, error) { } // When status changes(e.g. from running to other status), set a shorter `requeue after` to speed up processing. if c.Manager.GetStatus() != beforeStatus { - result.RequeueAfter = ExecutionRequeueDuration + result.RequeueAfter = cfg.ExecutionRequeueDuration } - c.Logger.V(obconst.LogLevelTrace).Info("Requeue after", "duration", result.RequeueAfter) + c.Logger.V(2).Info("Requeue after", "duration", result.RequeueAfter) return result, err } @@ -122,9 +112,9 @@ func (c *Coordinator) executeTaskFlow(f *tasktypes.TaskFlow) { c.Logger.Error(err, "No executable function found for task") c.Manager.PrintErrEvent(err) } else { - c.Logger.V(obconst.LogLevelDebug).Info("Successfully get task func " + f.OperationContext.Task.Display()) + c.Logger.V(1).Info("Successfully get task func " + f.OperationContext.Task.Display()) taskId := task.GetTaskManager().Submit(taskFunc) - c.Logger.V(obconst.LogLevelDebug).Info("Successfully submit task", "taskId", taskId) + c.Logger.V(1).Info("Successfully submit task", "taskId", taskId) f.OperationContext.TaskId = taskId f.OperationContext.TaskStatus = taskstatus.Running } @@ -137,7 +127,7 @@ func (c *Coordinator) executeTaskFlow(f *tasktypes.TaskFlow) { c.Manager.PrintErrEvent(err) f.OperationContext.TaskStatus = taskstatus.Failed } else if taskResult != nil { - c.Logger.V(obconst.LogLevelDebug).Info("Task finished", "task id", f.OperationContext.TaskId, "task result", taskResult) + c.Logger.V(1).Info("Task finished", "task id", f.OperationContext.TaskId, "task result", taskResult) f.OperationContext.TaskStatus = taskResult.Status if taskResult.Error != nil { c.Manager.PrintErrEvent(taskResult.Error) @@ -155,7 +145,7 @@ func (c *Coordinator) executeTaskFlow(f *tasktypes.TaskFlow) { switch f.OperationContext.OnFailure.Strategy { case strategy.RetryFromCurrent, strategy.StartOver: // if strategy is retry or start over, limit the maximum retry times - maxRetry := obconst.TaskMaxRetryTimes + maxRetry := cfg.TaskMaxRetryTimes if f.OperationContext.OnFailure.MaxRetry != 0 { maxRetry = f.OperationContext.OnFailure.MaxRetry } diff --git a/ui/src/components/Terminal/terminal.tsx b/ui/src/components/Terminal/terminal.tsx index 70965edd4..c2ab5e3f3 100644 --- a/ui/src/components/Terminal/terminal.tsx +++ b/ui/src/components/Terminal/terminal.tsx @@ -1,96 +1,116 @@ -import { Terminal } from '@xterm/xterm' -import { Button, Modal } from 'antd' -import React from 'react' +import { intl } from '@/utils/intl'; +import { Terminal } from '@xterm/xterm'; +import { Button, Modal } from 'antd'; +import React from 'react'; export interface ITerminal { - terminalId: string - onClose?: () => void - onConnected?: () => void + terminalId: string; + onClose?: () => void; + onConnected?: () => void; } function devLog(...args: any[]) { if (process.env.NODE_ENV === 'development') { - console.log(...args) + console.log(...args); } } export const OBTerminal: React.FC = (props) => { - const { terminalId } = props - const ref = React.useRef(null) - const [ws, setWs] = React.useState(null) + const { terminalId } = props; + const ref = React.useRef(null); + const [ws, setWs] = React.useState(null); React.useEffect(() => { if (ref.current) { const term = new Terminal({ cols: 160, rows: 60, - }) - term.open(ref.current) + }); + term.open(ref.current); if (term.options.fontSize) { - const containerWidth = ref.current.clientWidth + const containerWidth = ref.current.clientWidth; - const cols = Math.floor(containerWidth / 9.2) - const rows = Math.floor(cols / 4) - term.resize(cols, rows) - const ws = new WebSocket(`ws://${location.host}/api/v1/terminal/${terminalId}?cols=${cols}&rows=${rows}`) - term.write('Hello from \x1B[1;3;31mOceanBase\x1B[0m\r\n') + const cols = Math.floor(containerWidth / 9.2); + const rows = Math.floor(cols / 4); + term.resize(cols, rows); + const ws = new WebSocket( + `ws://${location.host}/api/v1/terminal/${terminalId}?cols=${cols}&rows=${rows}`, + ); + term.write('Hello from \x1B[1;3;31mOceanBase\x1B[0m\r\n'); ws.onopen = function () { - devLog('Websocket connection open ...') + devLog('Websocket connection open ...'); // ws.send(JSON.stringify({ type: 'ping' })) term.onData(function (data) { - ws.send(data) - }) - props.onConnected?.() - setWs(ws) + ws.send(data); + }); + props.onConnected?.(); + setWs(ws); window.addEventListener('beforeunload', () => { if (ws) { - ws.close() - props.onClose?.() - setWs(null) + ws.close(); + props.onClose?.(); + setWs(null); } - }) - } + }); + }; ws.onmessage = function (event) { - term.write(event.data) - } + term.write(event.data); + }; ws.onclose = function () { - devLog('Connection closed.') - term.write('\r\nConnection closed.\r\n') - } + devLog('Connection closed.'); + term.write('\r\nConnection closed.\r\n'); + }; ws.onerror = function (evt) { - console.error('WebSocket error observed:', evt) - } + console.error('WebSocket error observed:', evt); + }; } } return () => { - window.removeEventListener('beforeunload', () => { }) - } - }, []) + window.removeEventListener('beforeunload', () => {}); + }; + }, []); return ( <> - {ws &&
- -
} + {ws && ( +
+ +
+ )}
- ) -} + ); +}; diff --git a/ui/src/i18n/strings/en-US.json b/ui/src/i18n/strings/en-US.json index a8ec8e7cc..d840869b0 100644 --- a/ui/src/i18n/strings/en-US.json +++ b/ui/src/i18n/strings/en-US.json @@ -695,5 +695,15 @@ "Dashboard.Detail.Overview.BasicInfo.ClusterInformation": "Cluster Information", "Dashboard.Detail.Overview.ServerTable.ServerList": "Server List", "Dashboard.Detail.Overview.ZoneTable.ZoneList": "Zone List", - "Dashboard.Detail.NewBackup.AdvancedConfiguration.Days": "Days" + "Dashboard.Detail.NewBackup.AdvancedConfiguration.Days": "Days", + "Dashboard.Cluster.Detail.Connection1": "Connection", + "Dashboard.Cluster.Detail.Connection": "Cluster connection", + "Dashboard.Tenant.Detail.Connection1": "Connection", + "Dashboard.Cluster.Detail.CloseConnection": "Connection closed", + "Dashboard.Cluster.Detail.NotRunning": "Cluster is not running", + "Dashboard.Cluster.Detail.CreateConnection": "Create connection", + "Dashboard.components.Terminal.Disconnect": "Disconnect", + "Dashboard.components.Terminal.Disconnect1": "Are you sure you want to disconnect?", + "Dashboard.Tenant.Detail.Connection": "Tenant connection", + "Dashboard.Cluster.Detail.AbnormalOperation": "Tenant is not functioning properly" } diff --git a/ui/src/i18n/strings/zh-CN.json b/ui/src/i18n/strings/zh-CN.json index 436474a21..7adb32805 100644 --- a/ui/src/i18n/strings/zh-CN.json +++ b/ui/src/i18n/strings/zh-CN.json @@ -695,5 +695,15 @@ "Dashboard.Detail.Overview.BasicInfo.ClusterInformation": "集群信息", "Dashboard.Detail.Overview.ServerTable.ServerList": "Server 列表", "Dashboard.Detail.Overview.ZoneTable.ZoneList": "Zone 列表", - "Dashboard.Detail.NewBackup.AdvancedConfiguration.Days": "天" + "Dashboard.Detail.NewBackup.AdvancedConfiguration.Days": "天", + "Dashboard.Cluster.Detail.Connection": "集群连接", + "Dashboard.Cluster.Detail.Connection1": "集群连接", + "Dashboard.Tenant.Detail.Connection1": "连接租户", + "Dashboard.Cluster.Detail.CloseConnection": "连接已关闭", + "Dashboard.Cluster.Detail.NotRunning": "集群未运行", + "Dashboard.Cluster.Detail.CreateConnection": "创建连接", + "Dashboard.components.Terminal.Disconnect": "断开连接", + "Dashboard.components.Terminal.Disconnect1": "确定要断开连接吗?", + "Dashboard.Tenant.Detail.Connection": "连接租户", + "Dashboard.Cluster.Detail.AbnormalOperation": "租户未正常运行" } diff --git a/ui/src/pages/Cluster/Detail/Connection/index.tsx b/ui/src/pages/Cluster/Detail/Connection/index.tsx index e95b95c28..0eab810c5 100644 --- a/ui/src/pages/Cluster/Detail/Connection/index.tsx +++ b/ui/src/pages/Cluster/Detail/Connection/index.tsx @@ -1,40 +1,45 @@ -import React, { useState } from 'react' -import { PageContainer } from '@ant-design/pro-components' -import { intl } from '@/utils/intl' -import { OBTerminal } from '@/components/Terminal/terminal' -import { Button, Row, message } from 'antd' -import { request, useParams } from '@umijs/max' -import { useRequest } from 'ahooks' -import { getClusterDetailReq } from '@/services' -import BasicInfo from '../Overview/BasicInfo' - +import { OBTerminal } from '@/components/Terminal/terminal'; +import { getClusterDetailReq } from '@/services'; +import { intl } from '@/utils/intl'; +import { PageContainer } from '@ant-design/pro-components'; +import { request, useParams } from '@umijs/max'; +import { useRequest } from 'ahooks'; +import { Button, Row, message } from 'antd'; +import React, { useState } from 'react'; +import BasicInfo from '../Overview/BasicInfo'; const ClusterConnection: React.FC = () => { const header = () => { return { title: intl.formatMessage({ - id: 'dashboard.Cluster.Detail.Connection', + id: 'Dashboard.Cluster.Detail.Connection', defaultMessage: '集群连接', - }) - } - } - const {ns, name} = useParams(); + }), + }; + }; + const { ns, name } = useParams(); const { data: clusterDetail } = useRequest(getClusterDetailReq, { defaultParams: [{ name: name!, ns: ns! }], - }) + }); - const { runAsync } = useRequest(async (): Promise<{ - data: { terminalId: string } - }> => { - return request(`/api/v1/obclusters/namespace/${ns}/name/${name}/terminal`, { - method: 'PUT' - }) - }, { - manual: true - }) + const { runAsync } = useRequest( + async (): Promise<{ + data: { terminalId: string }; + }> => { + return request( + `/api/v1/obclusters/namespace/${ns}/name/${name}/terminal`, + { + method: 'PUT', + }, + ); + }, + { + manual: true, + }, + ); - const [terminalId, setTerminalId] = useState() + const [terminalId, setTerminalId] = useState(); return ( @@ -46,28 +51,50 @@ const ClusterConnection: React.FC = () => { {clusterDetail && ( )} -
+
{terminalId ? ( - { - setTerminalId(undefined) - message.info('连接已关闭') - }} /> + { + setTerminalId(undefined); + message.info( + intl.formatMessage({ + id: 'Dashboard.Cluster.Detail.CloseConnection', + defaultMessage: '连接已关闭', + }), + ); + }} + /> ) : ( - + )}
- ) -} + ); +}; -export default ClusterConnection +export default ClusterConnection; diff --git a/ui/src/pages/Cluster/Detail/index.tsx b/ui/src/pages/Cluster/Detail/index.tsx index 9f406b9d2..51260efba 100644 --- a/ui/src/pages/Cluster/Detail/index.tsx +++ b/ui/src/pages/Cluster/Detail/index.tsx @@ -87,7 +87,7 @@ const ClusterDetail: React.FC = () => { }, { title: intl.formatMessage({ - id: 'Dashboard.Cluster.Detail.Connection', + id: 'Dashboard.Cluster.Detail.Connection1', defaultMessage: '连接集群', }), key: 'connection', diff --git a/ui/src/pages/Tenant/Detail/Connection/index.tsx b/ui/src/pages/Tenant/Detail/Connection/index.tsx index 7d9c9016b..8523e0837 100644 --- a/ui/src/pages/Tenant/Detail/Connection/index.tsx +++ b/ui/src/pages/Tenant/Detail/Connection/index.tsx @@ -1,45 +1,51 @@ -import React, { useEffect, useState } from 'react' -import { PageContainer } from '@ant-design/pro-components' -import { intl } from '@/utils/intl' -import { OBTerminal } from '@/components/Terminal/terminal' -import { Button, Row, message } from 'antd' -import { request, useParams } from '@umijs/max' -import { useRequest } from 'ahooks' -import BasicInfo from '../Overview/BasicInfo' -import { getTenant } from '@/services/tenant' - +import { OBTerminal } from '@/components/Terminal/terminal'; +import { getTenant } from '@/services/tenant'; +import { intl } from '@/utils/intl'; +import { PageContainer } from '@ant-design/pro-components'; +import { request, useParams } from '@umijs/max'; +import { useRequest } from 'ahooks'; +import { Button, Row, message } from 'antd'; +import React, { useEffect, useState } from 'react'; +import BasicInfo from '../Overview/BasicInfo'; const TenantConnection: React.FC = () => { const header = () => { return { title: intl.formatMessage({ - id: 'dashboard.Tenant.Detail.Connection', + id: 'Dashboard.Tenant.Detail.Connection', defaultMessage: '连接租户', - }) - } - } + }), + }; + }; - const {ns, name} = useParams(); + const { ns, name } = useParams(); - const { data: tenantDetailResponse, run: getTenantDetail, loading } = useRequest(getTenant, { + const { + data: tenantDetailResponse, + run: getTenantDetail, + loading, + } = useRequest(getTenant, { manual: true, }); - const { runAsync } = useRequest(async (): Promise<{ - data: { terminalId: string } - }> => { - return request(`/api/v1/obtenants/${ns}/${name}/terminal`, { - method: 'PUT' - }) - }, { - manual: true - }) + const { runAsync } = useRequest( + async (): Promise<{ + data: { terminalId: string }; + }> => { + return request(`/api/v1/obtenants/${ns}/${name}/terminal`, { + method: 'PUT', + }); + }, + { + manual: true, + }, + ); useEffect(() => { getTenantDetail({ ns: ns!, name: name! }); }, []); - const [terminalId, setTerminalId] = useState() + const [terminalId, setTerminalId] = useState(); const tenantDetail = tenantDetailResponse?.data; @@ -51,30 +57,54 @@ const TenantConnection: React.FC = () => { /> {tenantDetail && ( - + )} -
+
{terminalId ? ( - { - setTerminalId(undefined) - message.info('连接已关闭') - }} /> + { + setTerminalId(undefined); + message.info( + intl.formatMessage({ + id: 'Dashboard.Cluster.Detail.CloseConnection', + defaultMessage: '连接已关闭', + }), + ); + }} + /> ) : ( - + )}
- ) -} + ); +}; -export default TenantConnection +export default TenantConnection; diff --git a/ui/src/pages/Tenant/Detail/index.tsx b/ui/src/pages/Tenant/Detail/index.tsx index 801c6c9e9..a3787ffa2 100644 --- a/ui/src/pages/Tenant/Detail/index.tsx +++ b/ui/src/pages/Tenant/Detail/index.tsx @@ -85,7 +85,7 @@ const TenantDetail: React.FC = () => { }, { title: intl.formatMessage({ - id: 'Dashboard.Tenant.Detail.Connection', + id: 'Dashboard.Tenant.Detail.Connection1', defaultMessage: '连接租户', }), key: 'connection',