diff --git a/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscale.yaml b/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscale.yaml new file mode 100644 index 0000000000..d7cf7c1c11 --- /dev/null +++ b/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscale.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer-autoscale + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscaler.yaml b/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscaler.yaml new file mode 100644 index 0000000000..37df8e5a77 --- /dev/null +++ b/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscaler.yaml @@ -0,0 +1,21 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PgBouncerAutoscaler +metadata: + name: pgbouncer-autoscale-ops + namespace: demo +spec: + databaseRef: + name: pgbouncer-autoscale + compute: + pgbouncer: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 400m + memory: 400Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" \ No newline at end of file diff --git a/docs/examples/pgbouncer/reconfigure/pb-custom-config.yaml b/docs/examples/pgbouncer/reconfigure/pb-custom-config.yaml new file mode 100644 index 0000000000..1851b3492d --- /dev/null +++ b/docs/examples/pgbouncer/reconfigure/pb-custom-config.yaml @@ -0,0 +1,22 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-custom + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/reconfigure/pbops-reconfigure-apply.yaml b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure-apply.yaml new file mode 100644 index 0000000000..867d1a4393 --- /dev/null +++ b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure-apply.yaml @@ -0,0 +1,17 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pbops-reconfigure-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pb-custom + configuration: + pgbouncer: + applyConfig: + pgbouncer.ini: |- + [pgbouncer] + auth_type=scram-sha-256 + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml new file mode 100644 index 0000000000..705f385a0f --- /dev/null +++ b/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml @@ -0,0 +1,15 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pbops-reconfigure + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pb-custom + configuration: + pgbouncer: + configSecret: + name: new-custom-config + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/pgbouncer/restart/ops.yaml b/docs/examples/pgbouncer/restart/ops.yaml new file mode 100644 index 0000000000..cff7b49016 --- /dev/null +++ b/docs/examples/pgbouncer/restart/ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: restart-pgbouncer + namespace: demo +spec: + type: Restart + databaseRef: + name: pgbouncer + timeout: 3m + apply: Always \ No newline at end of file diff --git a/docs/examples/pgbouncer/restart/pgbouncer.yaml b/docs/examples/pgbouncer/restart/pgbouncer.yaml new file mode 100644 index 0000000000..30d3b3cfc4 --- /dev/null +++ b/docs/examples/pgbouncer/restart/pgbouncer.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/scaling/horizontal-scaling-down-ops.yaml b/docs/examples/pgbouncer/scaling/horizontal-scaling-down-ops.yaml new file mode 100644 index 0000000000..7447bb29e8 --- /dev/null +++ b/docs/examples/pgbouncer/scaling/horizontal-scaling-down-ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-horizontal-scale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pb-horizontal + horizontalScaling: + replicas: 2 \ No newline at end of file diff --git a/docs/examples/pgbouncer/scaling/horizontal-scaling-ops.yaml b/docs/examples/pgbouncer/scaling/horizontal-scaling-ops.yaml new file mode 100644 index 0000000000..75dc3397f1 --- /dev/null +++ b/docs/examples/pgbouncer/scaling/horizontal-scaling-ops.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pb-horizontal + horizontalScaling: + replicas: 3 \ No newline at end of file diff --git a/docs/examples/pgbouncer/scaling/pb-horizontal.yaml b/docs/examples/pgbouncer/scaling/pb-horizontal.yaml new file mode 100644 index 0000000000..512304f102 --- /dev/null +++ b/docs/examples/pgbouncer/scaling/pb-horizontal.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-horizontal + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/scaling/pb-vertical-ops.yaml b/docs/examples/pgbouncer/scaling/pb-vertical-ops.yaml new file mode 100644 index 0000000000..cf24a4b110 --- /dev/null +++ b/docs/examples/pgbouncer/scaling/pb-vertical-ops.yaml @@ -0,0 +1,20 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-scale-vertical + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: pb-vertical + verticalScaling: + pgbouncer: + resources: + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "2Gi" + cpu: "1" + timeout: 5m + apply: IfReady \ No newline at end of file diff --git a/docs/examples/pgbouncer/scaling/pb-vertical.yaml b/docs/examples/pgbouncer/scaling/pb-vertical.yaml new file mode 100644 index 0000000000..97c25040b9 --- /dev/null +++ b/docs/examples/pgbouncer/scaling/pb-vertical.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-vertical + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/sync-users/pgbouncer-sync.yaml b/docs/examples/pgbouncer/sync-users/pgbouncer-sync.yaml new file mode 100644 index 0000000000..8aae37078b --- /dev/null +++ b/docs/examples/pgbouncer/sync-users/pgbouncer-sync.yaml @@ -0,0 +1,15 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer-sync + namespace: demo +spec: + version: "1.23.1" + replicas: 1 + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/sync-users/secret.yaml b/docs/examples/pgbouncer/sync-users/secret.yaml new file mode 100644 index 0000000000..f4b0e86148 --- /dev/null +++ b/docs/examples/pgbouncer/sync-users/secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/instance: ha-postgres + app.kubernetes.io/name: postgreses.kubedb.com + name: sync-secret + namespace: demo +stringData: + password: "12345" + username: "john" \ No newline at end of file diff --git a/docs/examples/pgbouncer/update-version/pb-update.yaml b/docs/examples/pgbouncer/update-version/pb-update.yaml new file mode 100644 index 0000000000..c5128d899d --- /dev/null +++ b/docs/examples/pgbouncer/update-version/pb-update.yaml @@ -0,0 +1,23 @@ +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-update + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/pgbouncer/update-version/pbops-update.yaml b/docs/examples/pgbouncer/update-version/pbops-update.yaml new file mode 100644 index 0000000000..aef1210119 --- /dev/null +++ b/docs/examples/pgbouncer/update-version/pbops-update.yaml @@ -0,0 +1,11 @@ +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-version-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: pb-update + updateVersion: + targetVersion: 1.23.1 \ No newline at end of file diff --git a/docs/guides/pgbouncer/README.md b/docs/guides/pgbouncer/README.md index c3ed534ed4..b64cf5f4bb 100644 --- a/docs/guides/pgbouncer/README.md +++ b/docs/guides/pgbouncer/README.md @@ -23,14 +23,21 @@ KubeDB operator now comes bundled with PgBouncer crd to handle connection poolin ## PgBouncer Features -| Features | Availability | -|------------------------------------|:------------:| -| Clustering | ✓ | -| Multiple PgBouncer Versions | ✓ | -| Customizable Pooling Configuration | ✓ | -| Custom docker images | ✓ | -| Builtin Prometheus Discovery | ✓ | -| Using Prometheus operator | ✓ | +| Features | Availability | +|-------------------------------------------------------------| :----------: | +| Multiple PgBouncer Versions | ✓ | +| Custom Configuration | ✓ | +| Externally manageable Auth Secret | ✓ | +| Reconfigurable Health Checker | ✓ | +| Integrate with externally managed PostgreSQL | ✓ | +| Sync Postgres Users to PgBouncer | ✓ | +| Custom docker images | ✓ | +| TLS: Add ( [Cert Manager]((https://cert-manager.io/docs/))) | ✓ | +| Monitoring with Prometheus & Grafana | ✓ | +| Builtin Prometheus Discovery | ✓ | +| Using Prometheus operator | ✓ | +| Alert Dashboard | ✓ | +| Grafana Dashboard | ✓ | ## User Guide diff --git a/docs/guides/pgbouncer/autoscaler/_index.md b/docs/guides/pgbouncer/autoscaler/_index.md new file mode 100644 index 0000000000..089e262d65 --- /dev/null +++ b/docs/guides/pgbouncer/autoscaler/_index.md @@ -0,0 +1,10 @@ +--- +title: Autoscaling +menu: + docs_{{ .version }}: + identifier: pb-auto-scaling + name: Autoscaling + parent: pb-pgbouncer-guides + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/pgbouncer/autoscaler/compute/_index.md b/docs/guides/pgbouncer/autoscaler/compute/_index.md new file mode 100644 index 0000000000..67db417dd2 --- /dev/null +++ b/docs/guides/pgbouncer/autoscaler/compute/_index.md @@ -0,0 +1,10 @@ +--- +title: Compute Autoscaling +menu: + docs_{{ .version }}: + identifier: pb-compute-auto-scaling + name: Compute Autoscaling + parent: pb-auto-scaling + weight: 10 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/pgbouncer/autoscaler/compute/compute-autoscale.md b/docs/guides/pgbouncer/autoscaler/compute/compute-autoscale.md new file mode 100644 index 0000000000..a137dfa447 --- /dev/null +++ b/docs/guides/pgbouncer/autoscaler/compute/compute-autoscale.md @@ -0,0 +1,434 @@ +--- +title: PgBouncer Autoscaling +menu: + docs_{{ .version }}: + identifier: pb-auto-scaling-pgbouncer + name: pgbouncerCompute + parent: pb-compute-auto-scaling + weight: 15 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of a PgBouncer + +This guide will show you how to use `KubeDB` to autoscale compute resources i.e. cpu and memory of a PgBouncer. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner, Ops-manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerAutoscaler](/docs/guides/pgbouncer/concepts/autoscaler.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + - [Compute Resource Autoscaling Overview](/docs/guides/pgbouncer/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgbouncer](/docs/examples/pgbouncer) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Autoscaling of PgBouncer + +### Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +Here, we are going to deploy a `PgBouncer` standalone using a supported version by `KubeDB` operator. Then we are going to apply `PgBouncerAutoscaler` to set up autoscaling. + +#### Deploy PgBouncer + +In this section, we are going to deploy a PgBouncer with version `1.18.0` Then, in the next section we will set up autoscaling for this pgbouncer using `PgBouncerAutoscaler` CRD. Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer-autoscale + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscale.yaml +pgbouncer.kubedb.com/pgbouncer-autoscale created +``` + +Now, wait until `pgbouncer-autoscale` has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo +NAME TYPE VERSION STATUS AGE +pgbouncer-autoscale kubedb.com/v1 1.18.0 Ready 22s +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo pgbouncer-autoscale-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "300Mi" + }, + "requests": { + "cpu": "200m", + "memory": "300Mi" + } +} +``` + +Let's check the PgBouncer resources, +```bash +$ kubectl get pgbouncer -n demo pgbouncer-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "memory": "300Mi" + }, + "requests": { + "cpu": "200m", + "memory": "300Mi" + } +} +``` + +You can see from the above outputs that the resources are same as the one we have assigned while deploying the pgbouncer. + +We are now ready to apply the `PgBouncerAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute (cpu and memory) autoscaling using a PgBouncerAutoscaler Object. + +#### Create PgBouncerAutoscaler Object + +In order to set up compute resource autoscaling for this pgbouncer, we have to create a `PgBouncerAutoscaler` CRO with our desired configuration. Below is the YAML of the `PgBouncerAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PgBouncerAutoscaler +metadata: + name: pgbouncer-autoscale-ops + namespace: demo +spec: + databaseRef: + name: pgbouncer-autoscale + compute: + pgbouncer: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 400m + memory: 400Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource autoscaling on `pgbouncer-autoscale`. +- `spec.compute.pgbouncer.trigger` specifies that compute resource autoscaling is enabled for this pgbouncer. +- `spec.compute.pgbouncer.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.replicaset.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. + If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.pgbouncer.minAllowed` specifies the minimum allowed resources for this pgbouncer. +- `spec.compute.pgbouncer.maxAllowed` specifies the maximum allowed resources for this pgbouncer. +- `spec.compute.pgbouncer.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.pgbouncer.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- `spec.opsRequestOptions` contains the options to pass to the created OpsRequest. It has 2 fields. Know more about them here : [timeout](/docs/guides/pgbouncer/concepts/opsrequest.md#spectimeout), [apply](/docs/guides/pgbouncer/concepts/opsrequest.md#specapply). + +Let's create the `PgBouncerAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/autoscaling/compute/pgbouncer-autoscaler.yaml +pgbouncerautoscaler.autoscaling.kubedb.com/pgbouncer-autoscaler-ops created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `pgbouncerautoscaler` resource is created successfully, + +```bash +$ kubectl get pgbouncerautoscaler -n demo +NAME AGE +pgbouncer-autoscale-ops 6m55s + +$ kubectl describe pgbouncerautoscaler pgbouncer-autoscale-ops -n demo +Name: pgbouncer-autoscale-ops +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: PgBouncerAutoscaler +Metadata: + Creation Timestamp: 2024-07-17T12:09:17Z + Generation: 1 + Resource Version: 81569 + UID: 3841c30b-3b19-4740-82f5-bf8e257ddc18 +Spec: + Compute: + PgBouncer: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 1Gi + Min Allowed: + Cpu: 400m + Memory: 400Mi + Pod Life Time Threshold: 5m0s + Resource Diff Percentage: 20 + Trigger: On + Database Ref: + Name: pgbouncer-autoscale + Ops Request Options: + Apply: IfReady +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Reference Timestamp: 2024-07-17T12:10:00Z + Total Weight: 0.8733542386168607 + First Sample Start: 2024-07-17T12:09:14Z + Last Sample Start: 2024-07-17T12:15:06Z + Last Update Time: 2024-07-17T12:15:38Z + Memory Histogram: + Bucket Weights: + Index: 11 + Weight: 10000 + Reference Timestamp: 2024-07-17T12:15:00Z + Total Weight: 0.7827734162991002 + Ref: + Container Name: pgbouncer + Vpa Object Name: pgbouncer-autoscale + Total Samples Count: 6 + Version: v3 + Conditions: + Last Transition Time: 2024-07-17T12:10:37Z + Message: Successfully created PgBouncerOpsRequest demo/pbops-pgbouncer-autoscale-zzell6 + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2024-07-17T12:09:37Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: pgbouncer + Lower Bound: + Cpu: 400m + Memory: 400Mi + Target: + Cpu: 400m + Memory: 400Mi + Uncapped Target: + Cpu: 100m + Memory: 262144k + Upper Bound: + Cpu: 1 + Memory: 1Gi + Vpa Name: pgbouncer-autoscale +Events: +``` +So, the `pgbouncerautoscaler` resource is created successfully. + +you can see in the `Status.VPAs.Recommendation` section, that recommendation has been generated for our pgbouncer. Our autoscaler operator continuously watches the recommendation generated and creates an `pgbounceropsrequest` based on the recommendations, if the pgbouncer pods are needed to scaled up or down. + +Let's watch the `pgbounceropsrequest` in the demo namespace to see if any `pgbounceropsrequest` object is created. After some time you'll see that a `pgbounceropsrequest` will be created based on the recommendation. + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pbops-pgbouncer-autoscale-zzell6 VerticalScaling Progressing 1m48s +``` + +Let's wait for the ops request to become successful. + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pbops-pgbouncer-autoscale-zzell6 VerticalScaling Successful 3m40s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to scale the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pbops-pgbouncer-autoscale-zzell6 +Name: pbops-pgbouncer-autoscale-zzell6 +Namespace: demo +Labels: app.kubernetes.io/component=connection-pooler + app.kubernetes.io/instance=pgbouncer-autoscale + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=pgbouncers.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-07-17T12:10:37Z + Generation: 1 + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: PgBouncerAutoscaler + Name: pgbouncer-autoscale-ops + UID: 3841c30b-3b19-4740-82f5-bf8e257ddc18 + Resource Version: 81200 + UID: 57f99d31-af3d-4157-aa61-0f509ec89bbd +Spec: + Apply: IfReady + Database Ref: + Name: pgbouncer-autoscale + Type: VerticalScaling + Vertical Scaling: + Node: + Resources: + Limits: + Cpu: 400m + Memory: 400Mi + Requests: + Cpu: 400m + Memory: 400Mi +Status: + Conditions: + Last Transition Time: 2024-07-17T12:10:37Z + Message: PgBouncer ops-request has started to vertically scaling the PgBouncer nodes + Observed Generation: 1 + Reason: VerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2024-07-17T12:10:40Z + Message: Successfully paused database + Observed Generation: 1 + Reason: DatabasePauseSucceeded + Status: True + Type: DatabasePauseSucceeded + Last Transition Time: 2024-07-17T12:10:40Z + Message: Successfully updated PetSets Resources + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-07-17T12:11:25Z + Message: Successfully Restarted Pods With Resources + Observed Generation: 1 + Reason: RestartPods + Status: True + Type: RestartPods + Last Transition Time: 2024-07-17T12:10:45Z + Message: get pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Observed Generation: 1 + Status: True + Type: GetPod--pgbouncer-autoscale-0 + Last Transition Time: 2024-07-17T12:10:45Z + Message: evict pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Observed Generation: 1 + Status: True + Type: EvictPod--pgbouncer-autoscale-0 + Last Transition Time: 2024-07-17T12:11:20Z + Message: check pod running; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Observed Generation: 1 + Status: True + Type: CheckPodRunning--pgbouncer-autoscale-0 + Last Transition Time: 2024-07-17T12:11:26Z + Message: Successfully completed the vertical scaling for PgBouncer + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m19s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pbops-pgbouncer-autoscale-zzell6 + Normal Starting 8m19s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pgbouncer-autoscale + Normal Successful 8m19s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pgbouncer-autoscale for PgBouncerOpsRequest: pbops-pgbouncer-autoscale-zzell6 + Normal UpdatePetSets 8m16s KubeDB Ops-manager Operator Successfully updated PetSets Resources + Warning get pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 8m11s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Warning evict pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 8m11s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Warning check pod running; ConditionStatus:False; PodName:pgbouncer-autoscale-0 8m6s KubeDB Ops-manager Operator check pod running; ConditionStatus:False; PodName:pgbouncer-autoscale-0 + Warning check pod running; ConditionStatus:True; PodName:pgbouncer-autoscale-0 7m36s KubeDB Ops-manager Operator check pod running; ConditionStatus:True; PodName:pgbouncer-autoscale-0 + Normal RestartPods 7m31s KubeDB Ops-manager Operator Successfully Restarted Pods With Resources + Normal Starting 7m31s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pgbouncer-autoscale + Normal Successful 7m30s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pgbouncer-autoscale for PgBouncerOpsRequest: pbops-pgbouncer-autoscale-zzell6 +``` + +Now, we are going to verify from the Pod, and the PgBouncer yaml whether the resources of the pgbouncer has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo pgbouncer-autoscale-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "400Mi" + }, + "requests": { + "cpu": "400m", + "memory": "400Mi" + } +} + +$ kubectl get pgbouncer -n demo pgbouncer-autoscale -o json | jq '.spec.podTemplate.spec.containers[0].resources' +{ + "limits": { + "memory": "400Mi" + }, + "requests": { + "cpu": "400m", + "memory": "400Mi" + } +} +``` + + +The above output verifies that we have successfully auto-scaled the resources of the PgBouncer. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete pb -n demo pgbouncer-autoscale +kubectl delete pgbouncerautoscaler -n demo pgbouncer-autoscale-ops +``` \ No newline at end of file diff --git a/docs/guides/pgbouncer/autoscaler/compute/overview.md b/docs/guides/pgbouncer/autoscaler/compute/overview.md new file mode 100644 index 0000000000..ea2848cf82 --- /dev/null +++ b/docs/guides/pgbouncer/autoscaler/compute/overview.md @@ -0,0 +1,55 @@ +--- +title: PgBouncer Compute Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: pb-auto-scaling-overview + name: Overview + parent: pb-compute-auto-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# PgBouncer Compute Resource Autoscaling + +This guide will give an overview on how KubeDB Autoscaler operator autoscales the database compute resources i.e. cpu and memory using `pgbouncerautoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerAutoscaler](/docs/guides/pgbouncer/concepts/autoscaler.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + +## How Compute Autoscaling Works + +The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `PgBouncer`. Open the image in a new tab to see the enlarged version. + +
+  Compute Auto Scaling process of PgBouncer +
Fig: Compute Auto Scaling process of PgBouncer
+
+ +The Auto Scaling process consists of the following steps: + +1. At first, a user creates a `PgBouncer` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `PgBouncer` CRO. + +3. When the operator finds a `PgBouncer` CRO, it creates `PetSet` and related necessary stuff like secrets, services, etc. + +4. Then, in order to set up autoscaling of `PgBouncer`, the user creates a `PgBouncerAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `PgBouncerAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator generates recommendation using the modified version of kubernetes [official recommender](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler/pkg/recommender) for different components of the database, as specified in the `PgBouncerAutoscaler` CRO. + +7. If the generated recommendation doesn't match the current resources of the database, then `KubeDB` Autoscaler operator creates a `PgBouncerOpsRequest` CRO to scale the pgbouncer to match the recommendation generated. + +8. `KubeDB` Ops-manager operator watches the `PgBouncerOpsRequest` CRO. + +9. Then the `KubeDB` Ops-manager operator will scale the pgbouncer vertically as specified on the `PgBouncerOpsRequest` CRO. + +In the next docs, we are going to show a step-by-step guide on Autoscaling of PgBouncer using `PgBouncerAutoscaler` CRD. diff --git a/docs/guides/pgbouncer/concepts/appbinding.md b/docs/guides/pgbouncer/concepts/appbinding.md index 1e1fdb8c49..170a60250f 100644 --- a/docs/guides/pgbouncer/concepts/appbinding.md +++ b/docs/guides/pgbouncer/concepts/appbinding.md @@ -20,8 +20,6 @@ An `AppBinding` is a Kubernetes `CustomResourceDefinition`(CRD) which points to If you deploy a database using [KubeDB](https://kubedb.com/docs/latest/welcome/), `AppBinding` object will be created automatically for it. Otherwise, you have to create an `AppBinding` object manually pointing to your desired database. -KubeDB uses [Stash](https://appscode.com/products/stash/) to perform backup/recovery of databases. Stash needs to know how to connect with a target database and the credentials necessary to access it. This is done via an `AppBinding`. - ## AppBinding CRD Specification Like any official Kubernetes resource, an `AppBinding` has `TypeMeta`, `ObjectMeta` and `Spec` sections. However, unlike other Kubernetes resources, it does not have a `Status` section. @@ -71,7 +69,7 @@ An `AppBinding` object has the following fields in the `spec` section: #### spec.type -`spec.type` is an optional field that indicates the type of the app that this `AppBinding` is pointing to. Stash uses this field to resolve the values of `TARGET_APP_TYPE`, `TARGET_APP_GROUP` and `TARGET_APP_RESOURCE` variables of [BackupBlueprint](https://appscode.com/products/stash/latest/concepts/crds/backupblueprint/) object. +`spec.type` is an optional field that indicates the type of the app that this `AppBinding` is pointing to. PgBouncer operator uses this field to recognise the desired postgres database. This field follows the following format: `/`. The above AppBinding is pointing to a `postgres` resource under `kubedb.com` group. diff --git a/docs/guides/pgbouncer/concepts/autoscaler.md b/docs/guides/pgbouncer/concepts/autoscaler.md new file mode 100644 index 0000000000..6a4f144e37 --- /dev/null +++ b/docs/guides/pgbouncer/concepts/autoscaler.md @@ -0,0 +1,73 @@ +--- +title: PgBouncerAutoscaler CRD +menu: + docs_{{ .version }}: + identifier: pb-autoscaler-concepts + name: PgBouncerAutoscaler + parent: pb-concepts-pgbouncer + weight: 35 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# PgBouncerAutoscaler + +## What is PgBouncerAutoscaler + +`PgBouncerAutoscaler` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for autoscaling [PgBouncer](https://www.pgbouncer.org/usage.html) compute resources of PgBouncer components in a Kubernetes native way. + +## PgBouncerAutoscaler CRD Specifications + +Like any official Kubernetes resource, a `PgBouncerAutoscaler` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `PgBouncerAutoscaler` CROs for autoscaling different components of pgbouncer is given below: + +**Sample `PgBouncerAutoscaler` for pgbouncer:** + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PgBouncerAutoscaler +metadata: + name: pgbouncer-auto-scale + namespace: demo +spec: + databaseRef: + name: pgbouncer-server + compute: + pgbouncer: + trigger: "On" + podLifeTimeThreshold: 24h + minAllowed: + cpu: 250m + memory: 350Mi + maxAllowed: + cpu: 1 + memory: 1Gi + controlledResources: ["cpu", "memory"] + containerControlledValues: "RequestsAndLimits" + resourceDiffPercentage: 10 +``` + +Here, we are going to describe the various sections of a `PgBouncerAutoscaler` crd. + +A `PgBouncerAutoscaler` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) object for which the autoscaling will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) object. + +### spec.compute + +`spec.compute` specifies the autoscaling configuration for the compute resources i.e. cpu and memory of PgBouncer components. This field consists of the following sub-field: + +- `trigger` indicates if compute autoscaling is enabled for this component of the pgbouncer. If "On" then compute autoscaling is enabled. If "Off" then compute autoscaling is disabled. +- `minAllowed` specifies the minimal amount of resources that will be recommended, default is no minimum. +- `maxAllowed` specifies the maximum amount of resources that will be recommended, default is no maximum. +- `controlledResources` specifies which type of compute resources (cpu and memory) are allowed for autoscaling. Allowed values are "cpu" and "memory". +- `containerControlledValues` specifies which resource values should be controlled. Allowed values are "RequestsAndLimits" and "RequestsOnly". +- `resourceDiffPercentage` specifies the minimum resource difference between recommended value and the current value in percentage. If the difference percentage is greater than this value than autoscaling will be triggered. +- `podLifeTimeThreshold` specifies the minimum pod lifetime of at least one of the pods before triggering autoscaling. \ No newline at end of file diff --git a/docs/guides/pgbouncer/concepts/opsrequest.md b/docs/guides/pgbouncer/concepts/opsrequest.md new file mode 100644 index 0000000000..50430e0b38 --- /dev/null +++ b/docs/guides/pgbouncer/concepts/opsrequest.md @@ -0,0 +1,263 @@ +--- +title: PgBouncerOpsRequests CRD +menu: + docs_{{ .version }}: + identifier: pb-opsrequest-concepts + name: PgBouncerOpsRequest + parent: pb-concepts-pgbouncer + weight: 25 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# PgBouncerOpsRequest + +## What is PgBouncerOpsRequest + +`PgBouncerOpsRequest` is a Kubernetes `Custom Resource Definitions` (CRD). It provides a declarative configuration for [PgBouncer](https://www.pgbouncer.org/usage.html) administrative operations like version updating, horizontal scaling, vertical scaling etc. in a Kubernetes native way. + +## PgBouncerOpsRequest CRD Specifications + +Like any official Kubernetes resource, a `PgBouncerOpsRequest` has `TypeMeta`, `ObjectMeta`, `Spec` and `Status` sections. + +Here, some sample `PgBouncerOpsRequest` CRs for different administrative operations is given below: + +**Sample `PgBouncerOpsRequest` for updating version:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-version-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: pgbouncer-server + updateVersion: + targetVersion: 1.18.0 +``` + +**Sample `PgBouncerOpsRequest` Objects for Horizontal Scaling:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-horizontal-scale + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pgbouncer-server + horizontalScaling: + replicas: 2 +``` + +**Sample `PgBouncerOpsRequest` Objects for Vertical Scaling:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-vertical-scale + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: pgbouncer-server + verticalScaling: + pgbouncer: + resources: + requests: + memory: "1200Mi" + cpu: "0.7" + limits: + memory: "1200Mi" + cpu: "0.7" +``` + +**Sample `PgBouncerOpsRequest` Objects for Reconfiguring:** + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-reconfigure + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pgbouncer-server + configuration: + pgbouncer: + applyConfig: + pgbouncer.conf: |- + auth_type = scram-sh-256 +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-reconfigure + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pgbouncer-server + configuration: + pgbouncer: + removeCustomConfig: true +``` + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-reconfigure + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pgbouncer-server + configuration: + pgbouncer: + configSecret: + name: new-custom-config +``` + +Here, we are going to describe the various sections of a `PgBouncerOpsRequest` crd. + +A `PgBouncerOpsRequest` object has the following fields in the `spec` section. + +### spec.databaseRef + +`spec.databaseRef` is a required field that point to the [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) object for which the administrative operations will be performed. This field consists of the following sub-field: + +- **spec.databaseRef.name :** specifies the name of the [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) object. + +### spec.type + +`spec.type` specifies the kind of operation that will be applied to the database. Currently, the following types of operations are allowed in `PgBouncerOpsRequest`. + +- `UpdateVersion` +- `HorizontalScaling` +- `VerticalScaling` +- `Reconfigure` +- `Restart` + +> You can perform only one type of operation on a single `PgBouncerOpsRequest` CR. For example, if you want to update your database and scale up its replica then you have to create two separate `PgBouncerOpsRequest`. At first, you have to create a `PgBouncerOpsRequest` for updating. Once it is completed, then you can create another `PgBouncerOpsRequest` for scaling. + +> Note: There is an exception to the above statement. It is possible to specify both `spec.configuration` & `spec.verticalScaling` in a OpsRequest of type `VerticalScaling`. + +### spec.updateVersion + +If you want to update your PgBouncer version, you have to specify the `spec.updateVersion` section that specifies the desired version information. This field consists of the following sub-field: + +- `spec.updateVersion.targetVersion` refers to a [PgBouncerVersion](/docs/guides/pgbouncer/concepts/catalog.md) CR that contains the PgBouncer version information where you want to update. + + +### spec.horizontalScaling + +If you want to scale-up or scale-down your PgBouncer cluster or different components of it, you have to specify `spec.horizontalScaling` section. This field consists of the following sub-field: + +- `spec.horizontalScaling.replicas` indicates the desired number of pods for PgBouncer cluster after scaling. For example, if your cluster currently has 4 pods, and you want to add additional 2 pods then you have to specify 6 in `spec.horizontalScaling.replicas` field. Similarly, if you want to remove one pod from the cluster, you have to specify 3 in `spec.horizontalScaling.replicas` field. + +### spec.verticalScaling + +`spec.verticalScaling` is a required field specifying the information of resources like `cpu`, `memory` etc. that will be scaled. This field consists of the following sub-fields: + +- `spec.verticalScaling.pgbouncer` indicates the desired resources for PetSet of PgBouncer after scaling. +- `spec.verticalScaling.exporter` indicates the desired resources for PetSet of PgBouncer Exporter after scaling. + +It has the below structure: + +```yaml +requests: + memory: "200Mi" + cpu: "0.1" +limits: + memory: "300Mi" + cpu: "0.2" +``` + +Here, when you specify the resource request, the scheduler uses this information to decide which node to place the container of the Pod on and when you specify a resource limit for the container, the `kubelet` enforces those limits so that the running container is not allowed to use more of that resource than the limit you set. You can found more details from [here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + + +### spec.configuration + +If you want to reconfigure your Running PgBouncer cluster or different components of it with new custom configuration, you have to specify `spec.configuration` section. This field consists of the following sub-field: + +- `configSecret` points to a secret in the same namespace of a PgBouncer resource, which contains the new custom configurations. If there are any configSecret set before in the database, this secret will replace it. +- `applyConfig` contains the new custom config as a string which will be merged with the previous configuration. + +- `applyConfig` is a map where key supports 1 values, namely `pgbouncer.ini`. + +```yaml + applyConfig: + pgbouncer.conf: |- + max_pool = 30 +``` + +- `removeCustomConfig` is a boolean field. Specify this field to true if you want to remove all the custom configuration from the deployed pgbouncer server. + +### spec.timeout +As we internally retry the ops request steps multiple times, This `timeout` field helps the users to specify the timeout for those steps of the ops request (in second). +If a step doesn't finish within the specified timeout, the ops request will result in failure. + +### spec.apply +This field controls the execution of opsRequest depending on the database state. It has two supported values: `Always` & `IfReady`. +Use IfReady, if you want to process the opsRequest only when the database is Ready. And use Always, if you want to process the execution of opsReq irrespective of the Database state. + + +### PgBouncerOpsRequest `Status` + +`.status` describes the current state and progress of a `PgBouncerOpsRequest` operation. It has the following fields: + +### status.phase + +`status.phase` indicates the overall phase of the operation for this `PgBouncerOpsRequest`. It can have the following three values: + +| Phase | Meaning | +|-------------|------------------------------------------------------------------------------------| +| Successful | KubeDB has successfully performed the operation requested in the PgBouncerOpsRequest | +| Progressing | KubeDB has started the execution of the applied PgBouncerOpsRequest | +| Failed | KubeDB has failed the operation requested in the PgBouncerOpsRequest | +| Denied | KubeDB has denied the operation requested in the PgBouncerOpsRequest | +| Skipped | KubeDB has skipped the operation requested in the PgBouncerOpsRequest | + +Important: Ops-manager Operator can skip an opsRequest, only if its execution has not been started yet & there is a newer opsRequest applied in the cluster. `spec.type` has to be same as the skipped one, in this case. + +### status.observedGeneration + +`status.observedGeneration` shows the most recent generation observed by the `PgBouncerOpsRequest` controller. + +### status.conditions + +`status.conditions` is an array that specifies the conditions of different steps of `PgBouncerOpsRequest` processing. Each condition entry has the following fields: + +- `types` specifies the type of the condition. PgBouncerOpsRequest has the following types of conditions: + +| Type | Meaning | +|--------------------------------|---------------------------------------------------------------------------| +| `Progressing` | Specifies that the operation is now in the progressing state | +| `Successful` | Specifies such a state that the operation on the database was successful. | +| `DatabasePauseSucceeded` | Specifies such a state that the database is paused by the operator | +| `ResumeDatabase` | Specifies such a state that the database is resumed by the operator | +| `Failed` | Specifies such a state that the operation on the database failed. | +| `UpdatePetSetResources` | Specifies such a state that the PetSet resources has been updated | +| `UpdatePetSet` | Specifies such a state that the PetSet has been updated | +| `IssueCertificatesSucceeded` | Specifies such a state that the tls certificate issuing is successful | +| `UpdateDatabase` | Specifies such a state that the CR of PgBouncer is updated | + +- The `status` field is a string, with possible values `True`, `False`, and `Unknown`. + - `status` will be `True` if the current transition succeeded. + - `status` will be `False` if the current transition failed. + - `status` will be `Unknown` if the current transition was denied. +- The `message` field is a human-readable message indicating details about the condition. +- The `reason` field is a unique, one-word, CamelCase reason for the condition's last transition. +- The `lastTransitionTime` field provides a timestamp for when the operation last transitioned from one state to another. +- The `observedGeneration` shows the most recent condition transition generation observed by the controller. diff --git a/docs/guides/pgbouncer/concepts/pgbouncer.md b/docs/guides/pgbouncer/concepts/pgbouncer.md index 40802d1f2e..925b4bba12 100644 --- a/docs/guides/pgbouncer/concepts/pgbouncer.md +++ b/docs/guides/pgbouncer/concepts/pgbouncer.md @@ -32,7 +32,7 @@ metadata: namespace: demo spec: version: "1.18.0" - replicas: 2 + replicas: 1 database: syncUsers: true databaseName: "postgres" diff --git a/docs/guides/pgbouncer/custom-versions/setup.md b/docs/guides/pgbouncer/custom-versions/setup.md index 737b2684d3..3519ea56e1 100644 --- a/docs/guides/pgbouncer/custom-versions/setup.md +++ b/docs/guides/pgbouncer/custom-versions/setup.md @@ -21,7 +21,7 @@ PgBouncerVersions are KubeDB crds that define the docker images KubeDB will use If you want to create a custom image of pgbouncer with additional features, the best way is to build on top of the existing kubedb image. ```docker -FROM kubedb/pgbouncer:1.17.0 +FROM kubedb/pgbouncer:1.23.1 ENV SOME_VERSION_VAR 0.9.1 @@ -32,18 +32,18 @@ RUN set -ex \ bash ``` -From there, we would define a PgBouncerVersion that contains this new image. Let's say we tagged it as `myco/pgbouncer:custom-1.17.0`. You can also build exporter image yourself using [pgbouncer_exporter](https://github.com/kubedb/pgbouncer_exporter) repository. +From there, we would define a PgBouncerVersion that contains this new image. Let's say we tagged it as `myco/pgbouncer:custom-1.23.1`. You can also build exporter image yourself using [pgbouncer_exporter](https://github.com/kubedb/pgbouncer_exporter) repository. ```yaml apiVersion: catalog.kubedb.com/v1alpha1 kind: PgBouncerVersion metadata: - name: "1.17.0" + name: "1.23.1" spec: deprecated: false - version: "1.17.0" + version: "1.23.1" pgBouncer: - image: "myco/pgbouncer:custom-1.17.0" + image: "myco/pgbouncer:custom-1.23.1" exporter: image: "myco/pgbouncer_exporter:v0.1.1" ``` @@ -57,7 +57,7 @@ metadata: name: pgbouncer-server namespace: demo spec: - version: "1.17.0" + version: "1.23.1" replicas: 1 connectionPool: poolMode: session diff --git a/docs/guides/pgbouncer/monitoring/_index.md b/docs/guides/pgbouncer/monitoring/_index.md index dfd3aeaa86..2f5d8efa54 100755 --- a/docs/guides/pgbouncer/monitoring/_index.md +++ b/docs/guides/pgbouncer/monitoring/_index.md @@ -5,6 +5,6 @@ menu: identifier: pb-monitoring-pgbouncer name: Monitoring parent: pb-pgbouncer-guides - weight: 50 + weight: 100 menu_name: docs_{{ .version }} --- diff --git a/docs/guides/pgbouncer/monitoring/overview.md b/docs/guides/pgbouncer/monitoring/overview.md index 6230ab5b42..4f6d5247f8 100644 --- a/docs/guides/pgbouncer/monitoring/overview.md +++ b/docs/guides/pgbouncer/monitoring/overview.md @@ -44,27 +44,26 @@ In order to enable monitoring for a database, you have to configure `spec.monito ## Sample Configuration -A sample YAML for Redis crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. +A sample YAML for PgBouncer crd with `spec.monitor` section configured to enable monitoring with [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) is shown below. ```yaml apiVersion: kubedb.com/v1 -kind: Redis +kind: PgBouncer metadata: - name: sample-redis - namespace: databases + name: pb + namespace: demo spec: - version: 6.0.20 - deletionPolicy: WipeOut - configSecret: # configure Redis to use password for authentication - name: redis-config - storageType: Durable - storage: - storageClassName: default - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 5Gi + replicas: 2 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "pg" + namespace: demo + connectionPool: + poolMode: session + port: 5432 monitor: agent: prometheus.io/operator prometheus: @@ -72,14 +71,6 @@ spec: labels: release: prometheus exporter: - args: - - --redis.password=$(REDIS_PASSWORD) - env: - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: _name_of_secret_with_redis_password - key: password # key with the password resources: requests: memory: 512Mi @@ -88,19 +79,14 @@ spec: memory: 512Mi cpu: 250m securityContext: - runAsUser: 2000 + runAsUser: 70 allowPrivilegeEscalation: false ``` -Assume that above Redis is configured to use basic authentication. So, exporter image also need to provide password to collect metrics. We have provided it through `spec.monitor.args` field. - -Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in `monitoring` namespace and this `ServiceMonitor` will have `release: prometheus` label. +Here, we have specified that we are going to monitor this server using Prometheus operator through `spec.monitor.agent: prometheus.io/operator`. KubeDB will create a `ServiceMonitor` crd in databases namespace and this `ServiceMonitor` will have `release: prometheus` label. ## Next Steps -- Learn how to monitor Elasticsearch database with KubeDB using [builtin-Prometheus](/docs/guides/elasticsearch/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/elasticsearch/monitoring/using-prometheus-operator.md). -- Learn how to monitor PostgreSQL database with KubeDB using [builtin-Prometheus](/docs/guides/postgres/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/postgres/monitoring/using-prometheus-operator.md). -- Learn how to monitor MySQL database with KubeDB using [builtin-Prometheus](/docs/guides/mysql/monitoring/builtin-prometheus/index.md) and using [Prometheus operator](/docs/guides/mysql/monitoring/prometheus-operator/index.md). -- Learn how to monitor MongoDB database with KubeDB using [builtin-Prometheus](/docs/guides/mongodb/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/mongodb/monitoring/using-prometheus-operator.md). -- Learn how to monitor Redis server with KubeDB using [builtin-Prometheus](/docs/guides/redis/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/redis/monitoring/using-prometheus-operator.md). -- Learn how to monitor Memcached server with KubeDB using [builtin-Prometheus](/docs/guides/memcached/monitoring/using-builtin-prometheus.md) and using [Prometheus operator](/docs/guides/memcached/monitoring/using-prometheus-operator.md). +- Learn how to monitor PgBouncer database with KubeDB using [builtin-Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md) +- Learn how to monitor PgBouncer database with KubeDB using [Prometheus operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). + diff --git a/docs/guides/pgbouncer/monitoring/setup-grafana-dashboard.md b/docs/guides/pgbouncer/monitoring/setup-grafana-dashboard.md deleted file mode 100644 index 911ba4b220..0000000000 --- a/docs/guides/pgbouncer/monitoring/setup-grafana-dashboard.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Monitor PgBouncer using Prometheus Operator -menu: - docs_{{ .version }}: - identifier: pb-setup-grafana-dashboard-monitoring - name: Setup Grafana Dashboard - parent: pb-monitoring-pgbouncer - weight: 25 -menu_name: docs_{{ .version }} -section_menu_id: guides ---- - -> New to KubeDB? Please start [here](/docs/README.md). - -# Visualize PgBouncer Using Grafana Dashboard - -[Grafana](https://github.com/grafana/grafana) is an open source, feature rich metrics dashboard and graph editor for Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB. PgBouncer comes with a Grafana dashboard designed to monitor real-time updates of PgBouncer servers using Prometheus metrics. - -This tutorial will show you how to import our dashboard on Grafana to monitor PgBouncer deployed with KubeDB. - -## Before You Begin - -- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). - -- To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/pgbouncer/monitoring/overview.md). - -- You need to have monitoring enabled using either [Builtin Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md) or [Prometheus operator](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). - -- To keep everything isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. - - ```bash - $ kubectl create ns monitoring - namespace/monitoring created - - $ kubectl create ns demo - namespace/demo created - ``` - -> Note: YAML files used in this tutorial are stored in [docs/examples/pgbouncer](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/pgbouncer) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). - -## Deploy Grafana - -After you have made sure that you have a PgBouncer server running with Monitoring enabled, you're ready to deploy your very own Grafana server. If you still have not deployed PgBouncer server with monitoring enabled, then do so using [Builtin Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md) or [Prometheus operator](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). - -However, if you already have a Grafana server running in your cluster, feel free to skip this part. Otherwise, create one using: - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/grafana.yaml -deployment.apps/grafana created -``` - -Let's get the name of the pod created by this deployment: - -```bash -$ kubectl get pod -n monitoring -l "app=grafana" - -NAME READY STATUS RESTARTS AGE -grafana-7cbd6b6f87-w9dkh 1/1 Running 0 57s -``` - -## View Dashboard - -Now, we have to expose the Grafana pod so that we can access it from a browser. - -```bash -$ kubectl port-forward -n monitoring grafana-7cbd6b6f87-w9dkh 3000 -Forwarding from 127.0.0.1:3000 -> 3000 -Forwarding from [::1]:3000 -> 3000 -``` - -Grafana should now be available on [localhost](http://localhost:3000/). Use default credentials `(username: admin, password: admin)` to login to Grafana Dashboard. - -## Add Data Source - -First, we need to know the name of the service that exposes our prometheus server pods. In this tutorial, we have used a service named `prometheus-operated` that exposes our prometheus metrics on port 9090. - -```bash -$ kubectl get service -n monitoring -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -prometheus-operated ClusterIP 10.111.246.229 9090/TCP 38m -``` - -We will use this service to point Grafana to our desired data source. - -From Home Dashboard, go to [Configuration > Data Sources](http://localhost:3000/datasources), and select `Add data source`. Select `Prometheus` as the `data source type`. - -In the following screen, add `http://prometheus-operated.monitoring.svc:9090` as the data source `URL`, give it a name `PGBOUNCER_PROMETHEUS`, and press the `Save and Test` button. You should get a message confirming that the `Data source is working`. - -

-  Data Target -

- -## Import Dashboard - -Now, go to [http://localhost:3000/dashboard/import](http://localhost:3000/dashboard/import) to import our PgBouncer Dashboard. Put `10945` as the grafana dashboard id. Select `PGBOUNCER_PROMETHEUS` as the data source, and press `import`. You will now be directed to your PgBouncer dashboard. - -

-  Data Target -

- -## Cleaning up - -To cleanup the Kubernetes resources created by this tutorial, run the following commands - -```bash -# cleanup prometheus resources -kubectl delete -n monitoring deployment grafana - -# delete namespace -kubectl delete ns monitoring -``` - -## Next Steps - -- Monitor your PgBouncer with KubeDB using [built-in Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). -- Monitor your PgBouncer with KubeDB using [Prometheus operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). -- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md b/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md index f747fc248e..ef1e0bb17a 100644 --- a/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md +++ b/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md @@ -14,7 +14,7 @@ section_menu_id: guides # Monitoring PgBouncer with builtin Prometheus -This tutorial will show you how to monitor PgBouncer using builtin [Prometheus](https://github.com/prometheus/prometheus) scraper. +This tutorial will show you how to monitor PgBouncer database using builtin [Prometheus](https://github.com/prometheus/prometheus) scraper. ## Before You Begin @@ -38,28 +38,31 @@ This tutorial will show you how to monitor PgBouncer using builtin [Prometheus]( > Note: YAML files used in this tutorial are stored in [docs/examples/pgbouncer](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/pgbouncer) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). +## Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + ## Deploy PgBouncer with Monitoring Enabled -At first, we will need a PgBouncer with monitoring enabled. This PgBouncer needs to be connected to PostgreSQL database(s). You can get a PgBouncer setup with active connection(s) to PostgreSQL by following the [quickstart](/docs/guides/pgbouncer/quickstart/quickstart.md) guide. PgBouncer object in that guide didn't come with monitoring. So we are going to enable monitoring in it. Below is the PgBouncer object that contains built-in monitoring: +At first, let's deploy a PgBouncer with monitoring enabled. Below is the PgBouncer object that we are going to create. ```yaml apiVersion: kubedb.com/v1 kind: PgBouncer metadata: - name: pgbouncer-server + name: builtin-prom-pb namespace: demo spec: - version: "1.17.0" replicas: 1 + version: "1.18.0" database: syncUsers: true databaseName: "postgres" databaseRef: - name: "quick-postgres" + name: "ha-postgres" namespace: demo connectionPool: - maxClientConnections: 20 - reservePoolSize: 5 + poolMode: session + port: 5432 monitor: agent: prometheus.io/builtin ``` @@ -68,49 +71,55 @@ Here, - `spec.monitor.agent: prometheus.io/builtin` specifies that we are going to monitor this server using builtin Prometheus scraper. -Let's patch the existing PgBouncer with the crd we have shown above. +Let's create the PgBouncer crd we have shown above. ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/builtin-prom-pgbouncer.yaml -pgbouncer.kubedb.com/pgbouncer-server configured +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/builtin-prom-pb.yaml +pgbouncer.kubedb.com/builtin-prom-pb created ``` -PgBouncer should still be in `Running` state. +Now, wait for the database to go into `Running` state. ```bash -$ kubectl get pb -n demo pgbouncer-server -NAME VERSION STATUS AGE -pgbouncer-server 1.17.0 Running 13s +$ kubectl get pb -n demo builtin-prom-pb +NAME TYPE VERSION STATUS AGE +builtin-prom-pb kubedb.com/v1 1.18.0 Ready 65s ``` -KubeDB will create a separate stats service with name `{PgBouncer crd name}-stats` for monitoring purpose. +KubeDB will create a separate stats service with name `{PgBouncer cr name}-stats` for monitoring purpose. ```bash -$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=pgbouncer-server" -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -pgbouncer-server ClusterIP 10.108.152.208 5432/TCP 16m -pgbouncer-server-stats ClusterIP 10.111.194.83 56790/TCP 16m +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=builtin-prom-pb" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +builtin-prom-pb ClusterIP 10.96.210.2 5432/TCP 86s +builtin-prom-pb-pods ClusterIP None 5432/TCP 86s +builtin-prom-pb-stats ClusterIP 10.96.215.193 56790/TCP 74s ``` -Here, `pgbouncer-server-stats` service has been created for monitoring purpose. Let's describe the service. +Here, `builtin-prom-pb-stats` service has been created for monitoring purpose. Let's describe the service. ```bash -$ kubectl describe svc -n demo pgbouncer-server-stats -Name: pgbouncer-server-stats +$ kubectl describe svc -n demo builtin-prom-pb-stats +Name: builtin-prom-pb-stats Namespace: demo -Labels: app.kubernetes.io/name=pgbouncers.kubedb.com - app.kubernetes.io/instance=pgbouncer-server +Labels: app.kubernetes.io/component=connection-pooler + app.kubernetes.io/instance=builtin-prom-pb + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=pgbouncers.kubedb.com kubedb.com/role=stats Annotations: monitoring.appscode.com/agent: prometheus.io/builtin prometheus.io/path: /metrics prometheus.io/port: 56790 prometheus.io/scrape: true -Selector: app.kubernetes.io/name=pgbouncers.kubedb.com,app.kubernetes.io/instance=pgbouncer-server +Selector: app.kubernetes.io/instance=builtin-prom-pb,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=pgbouncers.kubedb.com Type: ClusterIP -IP: 10.110.56.149 -Port: prom-http 56790/TCP -TargetPort: prom-http/TCP -Endpoints: 172.17.0.7:56790 +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.215.193 +IPs: 10.96.215.193 +Port: metrics 56790/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.28:56790 Session Affinity: None Events: ``` @@ -193,7 +202,7 @@ Let's configure a Prometheus scraping job to collect metrics from this service. If you already have a Prometheus server running, you have to add above scraping job in the `ConfigMap` used to configure the Prometheus server. Then, you have to restart it for the updated configuration to take effect. -> If you don't use a persistent volume for Prometheus storage, you will lose your previously scraped data on restart. +>If you don't use a persistent volume for Prometheus storage, you will lose your previously scraped data on restart. ### Deploy New Prometheus Server @@ -305,15 +314,6 @@ $ kubectl apply -f https://github.com/appscode/third-party-tools/raw/master/moni deployment.apps/prometheus created ``` -**Prometheus Service:** - -We will use a service for the Prometheus server. We can use this to look up metrics from within the cluster as well as outside of the cluster. - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/builtin-prom-service.yaml -service/prometheus-operated created -``` - ### Verify Monitoring Metrics Prometheus server is listening to port `9090`. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. @@ -321,26 +321,26 @@ Prometheus server is listening to port `9090`. We are going to use [port forward At first, let's check if the Prometheus pod is in `Running` state. ```bash -kubectl get pod -n monitoring -l=app=prometheus -NAME READY STATUS RESTARTS AGE -prometheus-789c9695fc-7rjzf 1/1 Running 0 27s +$ kubectl get pod -n monitoring -l=app=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-d64b668fb-4khbg 1/1 Running 0 21s ``` -Now, run following command on a separate terminal to forward 9090 port of `prometheus-8568c86d86-95zhn` pod, +Now, run following command on a separate terminal to forward 9090 port of `prometheus-d64b668fb-4khbg` pod, ```bash -$ kubectl port-forward -n monitoring svc/prometheus-operated 9090 +$ kubectl port-forward -n monitoring prometheus-d64b668fb-4khbg 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 ``` -Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090/targets](http://localhost:9090/targets) in your browser. You should see the endpoint of `pgbouncer-server-stats` service as one of the targets. +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see the endpoint of `builtin-prom-pb-stats` service as one of the targets.

-  Prometheus Target +  Prometheus Target

-Check the labels which confirm that the metrics are coming from `pgbouncer-server` through stats service `pgbouncer-server-stats`. +Check the labels marked with red rectangle. These labels confirm that the metrics are coming from `PgBouncer` database `builtin-prom-pb` through stats service `builtin-prom-pb-stats`. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. @@ -349,19 +349,22 @@ Now, you can view the collected metrics and create a graph from homepage of this To cleanup the Kubernetes resources created by this tutorial, run following commands ```bash -$ kubectl delete -n demo pb/pgbouncer-server +kubectl delete -n demo pb/builtin-prom-pb -$ kubectl delete -n monitoring deployment.apps/prometheus +kubectl delete -n monitoring deployment.apps/prometheus -$ kubectl delete -n monitoring clusterrole.rbac.authorization.k8s.io/prometheus -$ kubectl delete -n monitoring serviceaccount/prometheus -$ kubectl delete -n monitoring clusterrolebinding.rbac.authorization.k8s.io/prometheus +kubectl delete -n monitoring clusterrole.rbac.authorization.k8s.io/prometheus +kubectl delete -n monitoring serviceaccount/prometheus +kubectl delete -n monitoring clusterrolebinding.rbac.authorization.k8s.io/prometheus -$ kubectl delete ns demo -$ kubectl delete ns monitoring +kubectl delete ns demo +kubectl delete ns monitoring ``` ## Next Steps -- Monitor your PgBouncer with KubeDB using [`out-of-the-box` Prometheus operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). -- Use [private Docker registry](/docs/guides/pgbouncer/private-registry/using-private-registry.md) to deploy PgBouncer with KubeDB. + + +- Monitor your PgBouncer database with KubeDB using [out-of-the-box prometheus-Operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). +- Detail concepts of [PgBouncer object](/docs/guides/pgbouncer/concepts/pgbouncer.md). +- Detail concepts of [PgBouncerVersion object](/docs/guides/pgbouncer/concepts/catalog.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md b/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md index bc8d5f8fd7..a12813466a 100644 --- a/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md +++ b/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md @@ -12,9 +12,16 @@ section_menu_id: guides > New to KubeDB? Please start [here](/docs/README.md). -# Monitoring PgBouncer using Prometheus operator +# Monitoring PgBouncer Using Prometheus operator -[Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) provides simple and Kubernetes native way to deploy and configure Prometheus server. This tutorial will show you how to use Prometheus operator to monitor PgBouncer deployed with KubeDB. +[Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) provides simple and Kubernetes native way to deploy and configure Prometheus server. This tutorial will show you how to use Prometheus operator to monitor PgBouncer database deployed with KubeDB. + +The following diagram shows how KubeDB Provisioner operator monitor `PgBouncer` using Prometheus Operator. Open the image in a new tab to see the enlarged version. + +
+  Monitoring process of PgBouncer using Prometheus Operator +
Fig: Monitoring process of PgBouncer
+
## Before You Begin @@ -22,104 +29,175 @@ section_menu_id: guides - To learn how Prometheus monitoring works with KubeDB in general, please visit [here](/docs/guides/pgbouncer/monitoring/overview.md). -- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy respective monitoring resources. We are going to deploy database in `demo` namespace. +- We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, you can deploy one using this helm chart [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). + +- To keep Prometheus resources isolated, we are going to use a separate namespace called `monitoring` to deploy the prometheus operator helm chart. We are going to deploy database in `demo` namespace. ```bash $ kubectl create ns monitoring namespace/monitoring created + + $ kubectl create ns demo + namespace/demo created ``` -- We need a [Prometheus operator](https://github.com/prometheus-operator/prometheus-operator) instance running. If you don't already have a running instance, deploy one following the docs from [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/operator/README.md). -- If you already don't have a Prometheus server running, deploy one following tutorial from [here](https://github.com/appscode/third-party-tools/blob/master/monitoring/prometheus/operator/README.md#deploy-prometheus-server). > Note: YAML files used in this tutorial are stored in [docs/examples/pgbouncer](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/pgbouncer) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). ## Find out required labels for ServiceMonitor -We need to know the labels used to select `ServiceMonitor` by a `Prometheus` crd. We are going to provide these labels in `spec.monitor.prometheus.labels` field of PgBouncer crd so that KubeDB creates `ServiceMonitor` object accordingly. +We need to know the labels used to select `ServiceMonitor` by a `Prometheus` crd. We are going to provide these labels in `spec.monitor.prometheus.serviceMonitor.labels` field of PgBouncer crd so that KubeDB creates `ServiceMonitor` object accordingly. -As a prerequisite, we need to have Prometheus operator running, and a prometheus server created to monitor PgBouncer exporter. In this tutorial we are going to use a prometheus server named `promethus` in `monitoring` namespace. You can use the following to install `Prometheus operator`. +At first, let's find out the available Prometheus server in our cluster. ```bash -$ kubectl apply -f https://raw.githubusercontent.com/appscode/third-party-tools/master/monitoring/prometheus/coreos-operator/artifacts/operator.yaml -``` - -Now, get a prometheus server up and running. - -```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/coreos-prom-server.yaml - -clusterrole.rbac.authorization.k8s.io/prometheus created -serviceaccount/prometheus created -clusterrolebinding.rbac.authorization.k8s.io/prometheus created -prometheus.monitoring.coreos.com/prometheus created +$ kubectl get prometheus --all-namespaces +NAMESPACE NAME VERSION REPLICAS AGE +monitoring prometheus-kube-prometheus-prometheus v2.39.0 1 13d ``` -Now, let's find out the available Prometheus server in our cluster. +> If you don't have any Prometheus server running in your cluster, deploy one following the guide specified in **Before You Begin** section. +Now, let's view the YAML of the available Prometheus server `prometheus` in `monitoring` namespace. ```bash - -$ kubectl get prometheus --all-namespaces -NAMESPACE NAME AGE -default tufted-rodent-prometheus-o-prometheus 3h42m -monitoring prometheus 18m +$ kubectl get prometheus -n monitoring prometheus-kube-prometheus-prometheus -o yaml ``` - -Now, let's view the YAML of the available Prometheus server `prometheus` in `monitoring` namespace. - ```yaml -$ kubectl get prometheus -n monitoring prometheus -o yaml apiVersion: monitoring.coreos.com/v1 kind: Prometheus metadata: annotations: - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"monitoring.coreos.com/v1","kind":"Prometheus","metadata":{"annotations":{},"labels":{"prometheus":"prometheus"},"name":"prometheus","namespace":"monitoring"},"spec":{"replicas":1,"resources":{"requests":{"memory":"400Mi"}},"serviceAccountName":"prometheus","serviceMonitorSelector":{"matchLabels":{"release":"prometheus"}}}} - creationTimestamp: "2019-09-19T09:32:12Z" + meta.helm.sh/release-name: prometheus + meta.helm.sh/release-namespace: monitoring + creationTimestamp: "2024-07-15T09:54:08Z" generation: 1 labels: - prometheus: prometheus - name: prometheus + app: kube-prometheus-stack-prometheus + app.kubernetes.io/instance: prometheus + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/part-of: kube-prometheus-stack + app.kubernetes.io/version: 61.2.0 + chart: kube-prometheus-stack-61.2.0 + heritage: Helm + release: prometheus + name: prometheus-kube-prometheus-prometheus namespace: monitoring - resourceVersion: "38348" - selfLink: /apis/monitoring.coreos.com/v1/namespaces/monitoring/prometheuses/prometheus - uid: f9285974-3349-40e8-815a-8f50c3a8a4f5 + resourceVersion: "83770" + uid: 7144c771-beff-4285-b7a4-bc105c408bd2 spec: + alerting: + alertmanagers: + - apiVersion: v2 + name: prometheus-kube-prometheus-alertmanager + namespace: monitoring + pathPrefix: / + port: http-web + automountServiceAccountToken: true + enableAdminAPI: false + evaluationInterval: 30s + externalUrl: http://prometheus-kube-prometheus-prometheus.monitoring:9090 + hostNetwork: false + image: quay.io/prometheus/prometheus:v2.53.0 + listenLocal: false + logFormat: logfmt + logLevel: info + paused: false + podMonitorNamespaceSelector: {} + podMonitorSelector: + matchLabels: + release: prometheus + portName: http-web + probeNamespaceSelector: {} + probeSelector: + matchLabels: + release: prometheus replicas: 1 - resources: - requests: - memory: 400Mi - serviceAccountName: prometheus + retention: 10d + routePrefix: / + ruleNamespaceSelector: {} + ruleSelector: + matchLabels: + release: prometheus + scrapeConfigNamespaceSelector: {} + scrapeConfigSelector: + matchLabels: + release: prometheus + scrapeInterval: 30s + securityContext: + fsGroup: 2000 + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + serviceAccountName: prometheus-kube-prometheus-prometheus + serviceMonitorNamespaceSelector: {} serviceMonitorSelector: matchLabels: release: prometheus + shards: 1 + tsdb: + outOfOrderTimeWindow: 0s + version: v2.53.0 + walCompression: true +status: + availableReplicas: 1 + conditions: + - lastTransitionTime: "2024-07-15T09:56:09Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Available + - lastTransitionTime: "2024-07-15T09:56:09Z" + message: "" + observedGeneration: 1 + reason: "" + status: "True" + type: Reconciled + paused: false + replicas: 1 + selector: app.kubernetes.io/instance=prometheus-kube-prometheus-prometheus,app.kubernetes.io/managed-by=prometheus-operator,app.kubernetes.io/name=prometheus,operator.prometheus.io/name=prometheus-kube-prometheus-prometheus,prometheus=prometheus-kube-prometheus-prometheus + shardStatuses: + - availableReplicas: 1 + replicas: 1 + shardID: "0" + unavailableReplicas: 0 + updatedReplicas: 1 + shards: 1 + unavailableReplicas: 0 + updatedReplicas: 1 ``` -Notice the `spec.serviceMonitorSelector` section. Here, `release: prometheus` label is used to select `ServiceMonitor` crd. So, we are going to use this label in `spec.monitor.prometheus.labels` field of PgBouncer crd. +Notice the `spec.serviceMonitorSelector` section. Here, `release: prometheus` label is used to select `ServiceMonitor` crd. So, we are going to use this label in `spec.monitor.prometheus.serviceMonitor.labels` field of PgBouncer crd. + +## Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. ## Deploy PgBouncer with Monitoring Enabled -We will need a PgBouncer with monitoring enabled. This PgBouncer needs to be connected to PostgreSQL database(s). You can get a PgBouncer setup with active connection(s) to PostgreSQL by following the [quickstart](/docs/guides/pgbouncer/quickstart/quickstart.md) guide. PgBouncer object in that guide didn't come with monitoring. So we are going to enable monitoring in it. Below is the PgBouncer object that contains Prometheus operator based monitoring: +At first, let's deploy an PgBouncer database with monitoring enabled. Below is the PgBouncer object that we are going to create. ```yaml apiVersion: kubedb.com/v1 kind: PgBouncer metadata: - name: pgbouncer-server + name: coreos-prom-pb namespace: demo spec: - version: "1.17.0" replicas: 1 + version: "1.18.0" database: syncUsers: true databaseName: "postgres" databaseRef: - name: "quick-postgres" + name: "ha-postgres" namespace: demo connectionPool: - maxClientConnections: 20 - reservePoolSize: 5 + poolMode: session + port: 5432 + deletionPolicy: WipeOut monitor: agent: prometheus.io/operator prometheus: @@ -132,155 +210,162 @@ spec: Here, - `monitor.agent: prometheus.io/operator` indicates that we are going to monitor this server using Prometheus operator. -- `monitor.prometheus.namespace: monitoring` specifies that KubeDB should create `ServiceMonitor` in `monitoring` namespace. - -- `monitor.prometheus.labels` specifies that KubeDB should create `ServiceMonitor` with these labels. - +- `monitor.prometheus.serviceMonitor.labels` specifies that KubeDB should create `ServiceMonitor` with these labels. - `monitor.prometheus.interval` indicates that the Prometheus server should scrape metrics from this database with 10 seconds interval. Let's create the PgBouncer object that we have shown above, ```bash -$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/coreos-prom-pgbouncer.yaml -pgbouncer.kubedb.com/pgbouncer-server configured +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/monitoring/coreos-prom-pb.yaml +pgbouncer.kubedb.com/coreos-prom-pb created ``` Now, wait for the database to go into `Running` state. ```bash -$ kubectl get pb -n demo pgbouncer-server -NAME VERSION STATUS AGE -pgbouncer-server 1.17.0 Running 10s +$ kubectl get pb -n demo coreos-prom-pb +NAME TYPE VERSION STATUS AGE +coreos-prom-pb kubedb.com/v1 1.18.0 Ready 65s ``` KubeDB will create a separate stats service with name `{PgBouncer crd name}-stats` for monitoring purpose. ```bash -$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=pgbouncer-server" -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -pgbouncer-server ClusterIP 10.104.83.201 5432/TCP 52s -pgbouncer-server-stats ClusterIP 10.101.214.117 56790/TCP 50s +$ kubectl get svc -n demo --selector="app.kubernetes.io/instance=coreos-prom-pb" +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +coreos-prom-pb ClusterIP 10.96.201.180 9999/TCP,9595/TCP 4m3s +coreos-prom-pb-pods ClusterIP None 9999/TCP 4m3s +coreos-prom-pb-stats ClusterIP 10.96.73.22 9719/TCP 4m3s ``` -Here, `pgbouncer-server-stats` service has been created for monitoring purpose. +Here, `coreos-prom-pb-stats` service has been created for monitoring purpose. Let's describe this stats service. +```bash +$ kubectl describe svc -n demo coreos-prom-pb-stats +``` ```yaml -$ kubectl describe svc -n demo pgbouncer-server-stats -Name: pgbouncer-server-stats +Name: coreos-prom-pb-stats Namespace: demo -Labels: app.kubernetes.io/name=pgbouncers.kubedb.com - app.kubernetes.io/instance=pgbouncer-server - kubedb.com/role=stats -Annotations: monitoring.appscode.com/agent:prometheus.io/operator -Selector: app.kubernetes.io/name=pgbouncers.kubedb.com,app.kubernetes.io/instance=pgbouncer-server +Labels: app.kubernetes.io/component=connection-pooler + app.kubernetes.io/instance=coreos-prom-pb + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=pgbouncers.kubedb.com +Annotations: monitoring.appscode.com/agent: prometheus.io/operator +Selector: app.kubernetes.io/instance=coreos-prom-pb,app.kubernetes.io/managed-by=kubedb.com,app.kubernetes.io/name=pgbouncers.kubedb.com Type: ClusterIP -IP: 10.101.214.117 -Port: prom-http 56790/TCP -TargetPort: prom-http/TCP -Endpoints: 172.17.0.7:56790 +IP Family Policy: SingleStack +IP Families: IPv4 +IP: 10.96.73.22 +IPs: 10.96.73.22 +Port: metrics 9719/TCP +TargetPort: metrics/TCP +Endpoints: 10.244.0.26:9719 Session Affinity: None +Events: ``` -Notice the `Labels` and `Port` fields. `ServiceMonitor` will use these information to target its endpoints. +Notice the `Labels` and `Port` fields. `ServiceMonitor` will use this information to target its endpoints. -KubeDB will also create a `ServiceMonitor` crd in `monitoring` namespace that select the endpoints of `pgbouncer-server-stats` service. Verify that the `ServiceMonitor` crd has been created. +KubeDB will also create a `ServiceMonitor` crd in `demo` namespace that select the endpoints of `coreos-prom-pb-stats` service. Verify that the `ServiceMonitor` crd has been created. ```bash -$ kubectl get servicemonitor -n monitoring -NAME AGE -kubedb-demo-pgbouncer-server 3m4s +$ kubectl get servicemonitor -n demo +NAME AGE +coreos-prom-pb-stats 2m40s ``` Let's verify that the `ServiceMonitor` has the label that we had specified in `spec.monitor` section of PgBouncer crd. +```bash +$ kubectl get servicemonitor -n demo coreos-prom-pb-stats -o yaml +``` ```yaml -$ kubectl get servicemonitor -n monitoring kubedb-demo-pgbouncer-server -o yaml apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - creationTimestamp: "2019-09-19T10:03:24Z" + creationTimestamp: "2024-07-15T10:42:35Z" generation: 1 labels: + app.kubernetes.io/component: connection-pooler + app.kubernetes.io/instance: coreos-prom-pb + app.kubernetes.io/managed-by: kubedb.com + app.kubernetes.io/name: pgbouncers.kubedb.com release: prometheus - monitoring.appscode.com/service: pgbouncer-server-stats.demo - name: kubedb-demo-pgbouncer-server - namespace: monitoring + name: coreos-prom-pb-stats + namespace: demo ownerReferences: - - apiVersion: v1 - blockOwnerDeletion: true - kind: Service - name: pgbouncer-server-stats - uid: 749bc2ed-e14c-4a9e-9688-9d319af2b902 - resourceVersion: "41639" - selfLink: /apis/monitoring.coreos.com/v1/namespaces/monitoring/servicemonitors/kubedb-demo-pgbouncer-server - uid: 4a68d942-a003-4b47-a8cb-f20e526e9748 + - apiVersion: v1 + blockOwnerDeletion: true + controller: true + kind: Service + name: coreos-prom-pb-stats + uid: 844d49bc-dfe4-4ab7-a2dc-b5ec43c3b63e + resourceVersion: "87651" + uid: a7b859d8-306e-4061-9f70-4b57c4b784f7 spec: endpoints: - - honorLabels: true - interval: 5s - path: /metrics - port: prom-http + - honorLabels: true + interval: 10s + path: /metrics + port: metrics namespaceSelector: matchNames: - - demo + - demo selector: matchLabels: + app.kubernetes.io/component: connection-pooler + app.kubernetes.io/instance: coreos-prom-pb + app.kubernetes.io/managed-by: kubedb.com app.kubernetes.io/name: pgbouncers.kubedb.com - app.kubernetes.io/instance: pgbouncer-server - kubedb.com/role: stats ``` Notice that the `ServiceMonitor` has label `release: prometheus` that we had specified in PgBouncer crd. -Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `pgbouncer-server-stats` service. It also, target the `prom-http` port that we have seen in the stats service. +Also notice that the `ServiceMonitor` has selector which match the labels we have seen in the `coreos-prom-pb-stats` service. It also, target the `metrics` port that we have seen in the stats service. ## Verify Monitoring Metrics At first, let's find out the respective Prometheus pod for `prometheus` Prometheus server. ```bash -$ kubectl get pod -n monitoring -l=app=prometheus -NAME READY STATUS RESTARTS AGE -prometheus-prometheus-0 3/3 Running 1 35m +$ kubectl get pod -n monitoring -l=app.kubernetes.io/name=prometheus +NAME READY STATUS RESTARTS AGE +prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 1 13d ``` -Prometheus server is listening to port `9090` of `prometheus-prometheus-0` pod. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. +Prometheus server is listening to port `9090` of `prometheus-prometheus-kube-prometheus-prometheus-0` pod. We are going to use [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/) to access Prometheus dashboard. -Run following command on a separate terminal to forward the port 9090 of `prometheus-prometheus-0` pod, +Run following command on a separate terminal to forward the port 9090 of `prometheus-prometheus-kube-prometheus-prometheus-0` pod, ```bash -$ kubectl port-forward -n monitoring prometheus-prometheus-0 9090 +$ kubectl port-forward -n monitoring prometheus-prometheus-kube-prometheus-prometheus-0 9090 Forwarding from 127.0.0.1:9090 -> 9090 Forwarding from [::1]:9090 -> 9090 ``` -Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090/targets](http://localhost:9090/targets) in your browser. You should see `prom-http` endpoint of `pgbouncer-server-stats` service as one of the targets. +Now, we can access the dashboard at `localhost:9090`. Open [http://localhost:9090](http://localhost:9090) in your browser. You should see `metrics` endpoint of `coreos-prom-pb-stats` service as one of the targets.

  Prometheus Target

-Check the `endpoint` and `service` labels which verify that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create beautiful dashboard with collected metrics. +Check the `endpoint` and `service` labels marked by the red rectangles. It verifies that the target is our expected database. Now, you can view the collected metrics and create a graph from homepage of this Prometheus dashboard. You can also use this Prometheus server as data source for [Grafana](https://grafana.com/) and create a beautiful dashboard with collected metrics. ## Cleaning up -To cleanup the Kubernetes resources created by this tutorial, run the following commands +To clean up the Kubernetes resources created by this tutorial, run following commands ```bash -# cleanup prometheus resources -kubectl delete -n monitoring prometheus prometheus -kubectl delete -n monitoring clusterrolebinding prometheus -kubectl delete -n monitoring clusterrole prometheus -kubectl delete -n monitoring serviceaccount prometheus -kubectl delete -n monitoring service prometheus-operated - -# delete namespace -kubectl delete ns monitoring +kubectl delete -n demo pb/coreos-prom-pb +kubectl delete -n demo pg/ha-postgres +kubectl delete ns demo ``` ## Next Steps -- Monitor your PgBouncer with KubeDB using [built-in Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). +- Monitor your PgBouncer database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). +- Detail concepts of [PgBouncer object](/docs/guides/pgbouncer/concepts/pgbouncer.md). +- Detail concepts of [PgBouncerVersion object](/docs/guides/pgbouncer/concepts/catalog.md). - Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/pgbouncer/quickstart/quickstart.md b/docs/guides/pgbouncer/quickstart/quickstart.md index 5a366ced90..187e9ad530 100644 --- a/docs/guides/pgbouncer/quickstart/quickstart.md +++ b/docs/guides/pgbouncer/quickstart/quickstart.md @@ -47,6 +47,7 @@ $ kubectl get pgbouncerversions NAME VERSION PGBOUNCER_IMAGE DEPRECATED AGE 1.17.0 1.17.0 ghcr.io/kubedb/pgbouncer:1.17.0 22h 1.18.0 1.18.0 ghcr.io/kubedb/pgbouncer:1.18.0 22h + 1.23.1 1.23.1 ghcr.io/kubedb/pgbouncer:1.23.1 22h ``` @@ -156,7 +157,7 @@ KubeDB implements a PgBouncer crd to define the specifications of a PgBouncer. Below is the PgBouncer object created in this tutorial. -`Note`: If your `KubeDB version` is less or equal to `v2024.6.4`, You have to use `v1alpha2` apiVersion. +`Note`: If your `KubeDB version` is less or equal to `v2024.6.4`, You have to use `v1` apiVersion. ```yaml apiVersion: kubedb.com/v1 @@ -208,7 +209,7 @@ spec: ``` ```bash -$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/quickstart/pgbouncer-server-v1alpha2.yaml +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/quickstart/pgbouncer-server-v1.yaml pgbouncer.kubedb.com/pgbouncer-server created ``` @@ -258,9 +259,8 @@ Following table show what KubeDB does when you delete Postgres crd for different | Behavior | DoNotTerminate | Delete | WipeOut | |---------------------------| :------------: | :------: | :------: | | 1. Block Delete operation | ✓ | ✗ | ✗ | -| 2. Delete PetSet | ✗ | ✓ | ✓ | +| 2. Delete PetSet | ✗ | ✓ | ✓ | | 3. Delete Services | ✗ | ✓ | ✓ | -| 4. Delete PVCs | ✗ | ✓ | ✓ | | 5. Delete Secrets | ✗ | ✗ | ✓ | diff --git a/docs/guides/pgbouncer/reconfigure/_index.md b/docs/guides/pgbouncer/reconfigure/_index.md new file mode 100644 index 0000000000..9f613fd9a8 --- /dev/null +++ b/docs/guides/pgbouncer/reconfigure/_index.md @@ -0,0 +1,10 @@ +--- +title: Reconfigure +menu: + docs_{{ .version }}: + identifier: pb-reconfigure + name: Reconfigure + parent: pb-pgbouncer-guides + weight: 48 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/pgbouncer/reconfigure/overview.md b/docs/guides/pgbouncer/reconfigure/overview.md new file mode 100644 index 0000000000..d926ecaeb5 --- /dev/null +++ b/docs/guides/pgbouncer/reconfigure/overview.md @@ -0,0 +1,54 @@ +--- +title: Reconfiguring PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-reconfigure-overview + name: Overview + parent: pb-reconfigure + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfiguring PgBouncer + +This guide will give an overview on how KubeDB Ops-manager operator reconfigures `PgBouncer`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + +## How Reconfiguring PgBouncer Process Works + +The following diagram shows how KubeDB Ops-manager operator reconfigures `PgBouncer`. Open the image in a new tab to see the enlarged version. + +
+  Reconfiguring process of PgBouncer +
Fig: Reconfiguring process of PgBouncer
+
+ +The Reconfiguring PgBouncer process consists of the following steps: + +1. At first, a user creates a `PgBouncer` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `PgBouncer` CR. + +3. When the operator finds a `PgBouncer` CR, it creates `PetSet` and related necessary stuff like secrets, services, etc. + +4. Then, in order to reconfigure of the `PgBouncer`, the user creates a `PgBouncerOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `PgBouncerOpsRequest` CR. + +6. When it finds a `PgBouncerOpsRequest` CR, it pauses the `PgBouncer` object which is referred from the `PgBouncerOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `PgBouncer` object during the reconfiguring process. + +7. Then the `KubeDB` Ops-manager operator will replace the existing configuration with the new configuration provided or merge the new configuration with the existing configuration according to the `PgBouncerOpsRequest` CR. + +8. Then the `KubeDB` Ops-manager operator will perform reload operation in each Pod so that the desired configuration will replace the old configuration. + +9. After the successful reconfiguring of the `PgBouncer`, the `KubeDB` Ops-manager operator resumes the `PgBouncer` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step-by-step guide on reconfiguring PgBouncer database components using `PgBouncerOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md b/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md new file mode 100644 index 0000000000..4e0e3c4f5d --- /dev/null +++ b/docs/guides/pgbouncer/reconfigure/reconfigure-pgbouncer.md @@ -0,0 +1,563 @@ +--- +title: Reconfigure Standalone PgBouncer Database +menu: + docs_{{ .version }}: + identifier: pb-reconfigure-pgbouncer + name: PgBouncer Reconfigure + parent: pb-reconfigure + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Reconfigure PgBouncer + +This guide will show you how to use `KubeDB` Ops-manager operator to reconfigure a PgBouncer. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + - [Reconfigure Overview](/docs/guides/pgbouncer/reconfigure/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgbouncer](/docs/examples/pgbouncer) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +### Prepare Postgres +For a PgBouncer surely we will need a Postgres server so, prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +Now, we are going to deploy a `PgBouncer` using a supported version by `KubeDB` operator. Then we are going to apply `PgBouncerOpsRequest` to reconfigure its configuration. + +### Prepare PgBouncer + +Now, we are going to deploy a `PgBouncer` with version `1.18.0`. + +### Deploy PgBouncer + +At first, we will create `pgbouncer.ini` file containing required configuration settings. + +```ini +$ cat pgbouncer.ini +[pgbouncer] +auth_type = scram-sha-256 +``` +Here, `auth_type` is set to `scram-sha-256`, whereas the default value is `md5`. + +Now, we will create a secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo pb-custom-config --from-file=./pgbouncer.ini +secret/pb-custom-config created +``` + +In this section, we are going to create a PgBouncer object specifying `spec.configSecret` field to apply this custom configuration. Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-custom + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/reconfigure/pb-custom-config.yaml +pgbouncer.kubedb.com/pb-custom created +``` + +Now, wait until `pb-custom` has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo +NAME TYPE VERSION STATUS AGE +pb-custom kubedb.com/v1 1.18.0 Ready 112s +``` + +Now, we will check if the pgbouncer has started with the custom configuration we have provided. + +Now, you can exec into the pgbouncer pod and find if the custom configuration is there, + +```bash +$ kubectl exec -it -n demo pb-custom-0 -- /bin/sh +pb-custom-0:/$ cat etc/config/pgbouncer.ini +[databases] +postgres= host=ha-postgres.demo.svc port=5432 dbname=postgres + +[pgbouncer] +max_client_conn = 87 +default_pool_size = 2 +min_pool_size = 1 +max_db_connections = 1 +logfile = /tmp/pgbouncer.log +listen_port = 5432 +ignore_startup_parameters = extra_float_digits +pidfile = /tmp/pgbouncer.pid +listen_addr = * +reserve_pool_size = 5 +reserve_pool_timeout = 5 +auth_type = scram-sha-256 +auth_file = /var/run/pgbouncer/secret/userlist +admin_users = pgbouncer +pool_mode = session +max_user_connections = 2 +stats_period = 60 +pb-custom-0:/$ exit +exit +``` + +As we can see from the configuration of running pgbouncer, the value of `auth_type` has been set to `scram-sha-256`. + +### Reconfigure using new secret + +Now we will reconfigure this pgbouncer to set `auth_type` to `md5`. + +Now, we will edit the `pgbouncer.ini` file containing required configuration settings. + +```ini +$ cat pgbouncer.ini +auth_type=md5 +``` + +Then, we will create a new secret with this configuration file. + +```bash +$ kubectl create secret generic -n demo new-custom-config --from-file=./pgbouncer.ini +secret/new-custom-config created +``` + +#### Create PgBouncerOpsRequest + +Now, we will use this secret to replace the previous secret using a `PgBouncerOpsRequest` CR. The `PgBouncerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pbops-reconfigure + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pb-custom + configuration: + pgbouncer: + configSecret: + name: new-custom-config + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `pb-csutom` pgbouncer. +- `spec.type` specifies that we are performing `Reconfigure` on our pgbouncer. +- `spec.configuration.pgbouncer.configSecret.name` specifies the name of the new secret. +- Have a look [here](/docs/guides/pgbouncer/concepts/opsrequest.md#spectimeout) on the respective sections to understand the `timeout` & `apply` fields. + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/reconfigure/pbops-reconfigure.yaml +pgbounceropsrequest.ops.kubedb.com/pbops-reconfigure created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will update the `configSecret` of `PgBouncer` object. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pbops-reconfigure Reconfigure Successful 63s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to reconfigure the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pbops-reconfigure +Name: pbops-reconfigure +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-28T10:06:23Z + Generation: 1 + Resource Version: 86377 + UID: f96d088e-a32b-40eb-bd9b-ca15a8370548 +Spec: + Apply: IfReady + Configuration: + Pgbouncer: + Config Secret: + Name: new-custom-config + Database Ref: + Name: pb-custom + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-28T10:06:23Z + Message: Controller has started to Progress with Reconfigure of PgBouncerOpsRequest: demo/pbops-reconfigure + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-28T10:06:26Z + Message: paused pgbouncer database + Observed Generation: 1 + Reason: Paused + Status: True + Type: Paused + Last Transition Time: 2024-11-28T10:06:36Z + Message: Successfully updated PgBouncer + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-11-28T10:06:36Z + Message: Successfully updated PgBouncer backend secret + Observed Generation: 1 + Reason: UpdateBackendSecret + Status: True + Type: UpdateBackendSecret + Last Transition Time: 2024-11-28T10:06:41Z + Message: get pod; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: GetPod--pb-custom-0 + Last Transition Time: 2024-11-28T10:07:16Z + Message: volume mount check; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: VolumeMountCheck--pb-custom-0 + Last Transition Time: 2024-11-28T10:07:21Z + Message: reload config; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: ReloadConfig--pb-custom-0 + Last Transition Time: 2024-11-28T10:07:21Z + Message: Reloading performed successfully in PgBouncer: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure + Observed Generation: 1 + Reason: ReloadPodsSucceeded + Status: True + Type: ReloadPods + Last Transition Time: 2024-11-28T10:07:21Z + Message: Successfully Reconfigured + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-28T10:07:21Z + Message: Controller has successfully completed with Reconfigure of PgBouncerOpsRequest: demo/pbops-reconfigure + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 70s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pbops-reconfigure + Normal Starting 70s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-custom + Normal Successful 70s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 52s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning volume mount check; ConditionStatus:False; PodName:pb-custom-0 52s KubeDB Ops-manager Operator volume mount check; ConditionStatus:False; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 47s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 42s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 37s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 32s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 27s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 22s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 17s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning volume mount check; ConditionStatus:True; PodName:pb-custom-0 17s KubeDB Ops-manager Operator volume mount check; ConditionStatus:True; PodName:pb-custom-0 + Warning reload config; ConditionStatus:True; PodName:pb-custom-0 12s KubeDB Ops-manager Operator reload config; ConditionStatus:True; PodName:pb-custom-0 + Warning reload config; ConditionStatus:True; PodName:pb-custom-0 12s KubeDB Ops-manager Operator reload config; ConditionStatus:True; PodName:pb-custom-0 + Normal Successful 12s KubeDB Ops-manager Operator Reloading performed successfully in PgBouncer: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure + Normal Starting 12s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-custom + Normal Successful 12s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-custom + Normal Successful 12s KubeDB Ops-manager Operator Controller has Successfully Reconfigured PgBouncer databases: demo/pb-custom +``` + +Now let's exec into the pgbouncer pod and check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo pb-custom-0 -- /bin/sh +pb-custom-0:/$ cat etc/config/pgbouncer.ini +[databases] +postgres= host=ha-postgres.demo.svc port=5432 dbname=postgres + +[pgbouncer] +max_db_connections = 1 +logfile = /tmp/pgbouncer.log +listen_addr = * +admin_users = pgbouncer +pool_mode = session +max_client_conn = 87 +listen_port = 5432 +ignore_startup_parameters = extra_float_digits +auth_file = /var/run/pgbouncer/secret/userlist +default_pool_size = 2 +min_pool_size = 1 +max_user_connections = 2 +stats_period = 60 +auth_type = md5 +pidfile = /tmp/pgbouncer.pid +reserve_pool_size = 5 +reserve_pool_timeout = 5 +pb-custom-0:/$ exit +exit +``` + +As we can see from the configuration of running pgbouncer, the value of `auth_type` has been changed from `scram-sha-256` to `md5`. So the reconfiguration of the pgbouncer is successful. + + +### Reconfigure using apply config + +Now we will reconfigure this pgbouncer again to set `auth_type` to `scram-sha-256`. This time we won't use a new secret. We will use the `applyConfig` field of the `PgBouncerOpsRequest`. This will merge the new config in the existing secret. + +#### Create PgBouncerOpsRequest + +Now, we will use the new configuration in the `data` field in the `PgBouncerOpsRequest` CR. The `PgBouncerOpsRequest` yaml is given below, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pbops-reconfigure-apply + namespace: demo +spec: + type: Reconfigure + databaseRef: + name: pb-custom + configuration: + pgbouncer: + applyConfig: + pgbouncer.ini: |- + [pgbouncer] + auth_type=scram-sha-256 + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are reconfiguring `pb-custom` pgbouncer. +- `spec.type` specifies that we are performing `Reconfigure` on our pgbouncer. +- `spec.configuration.pgbouncer.applyConfig` specifies the new configuration that will be merged in the existing secret. + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/reconfigure/pbops-reconfigure-apply.yaml +pgbounceropsrequest.ops.kubedb.com/pbops-reconfigure-apply created +``` + +#### Verify the new configuration is working + +If everything goes well, `KubeDB` Ops-manager operator will merge this new config with the existing configuration. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pbops-reconfigure Reconfigure Successful 9m15s +pbops-reconfigure-apply Reconfigure Successful 53s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to reconfigure the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pbops-reconfigure-apply +Name: pbops-reconfigure-apply +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-28T10:11:52Z + Generation: 1 + Resource Version: 86774 + UID: a4b8e8b5-0b82-4391-a8fe-66911aa5bee6 +Spec: + Apply: IfReady + Configuration: + Pgbouncer: + Apply Config: + pgbouncer.ini: [pgbouncer] +auth_type=scram-sha-256 + Database Ref: + Name: pb-custom + Timeout: 5m + Type: Reconfigure +Status: + Conditions: + Last Transition Time: 2024-11-28T10:11:52Z + Message: Controller has started to Progress with Reconfigure of PgBouncerOpsRequest: demo/pbops-reconfigure-apply + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-28T10:11:55Z + Message: paused pgbouncer database + Observed Generation: 1 + Reason: Paused + Status: True + Type: Paused + Last Transition Time: 2024-11-28T10:11:55Z + Message: Successfully updated PgBouncer + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-11-28T10:11:55Z + Message: Successfully updated PgBouncer backend secret + Observed Generation: 1 + Reason: UpdateBackendSecret + Status: True + Type: UpdateBackendSecret + Last Transition Time: 2024-11-28T10:12:00Z + Message: get pod; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: GetPod--pb-custom-0 + Last Transition Time: 2024-11-28T10:12:00Z + Message: volume mount check; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: VolumeMountCheck--pb-custom-0 + Last Transition Time: 2024-11-28T10:12:05Z + Message: reload config; ConditionStatus:True; PodName:pb-custom-0 + Observed Generation: 1 + Status: True + Type: ReloadConfig--pb-custom-0 + Last Transition Time: 2024-11-28T10:12:05Z + Message: Reloading performed successfully in PgBouncer: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure-apply + Observed Generation: 1 + Reason: ReloadPodsSucceeded + Status: True + Type: ReloadPods + Last Transition Time: 2024-11-28T10:12:05Z + Message: Successfully Reconfigured + Observed Generation: 1 + Reason: Reconfigure + Status: True + Type: Reconfigure + Last Transition Time: 2024-11-28T10:12:05Z + Message: Controller has successfully completed with Reconfigure of PgBouncerOpsRequest: demo/pbops-reconfigure-apply + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 54s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pbops-reconfigure-apply + Normal Starting 54s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-custom + Normal Successful 54s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure-apply + Warning get pod; ConditionStatus:True; PodName:pb-custom-0 46s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-custom-0 + Warning volume mount check; ConditionStatus:True; PodName:pb-custom-0 46s KubeDB Ops-manager Operator volume mount check; ConditionStatus:True; PodName:pb-custom-0 + Warning reload config; ConditionStatus:True; PodName:pb-custom-0 41s KubeDB Ops-manager Operator reload config; ConditionStatus:True; PodName:pb-custom-0 + Warning reload config; ConditionStatus:True; PodName:pb-custom-0 41s KubeDB Ops-manager Operator reload config; ConditionStatus:True; PodName:pb-custom-0 + Normal Successful 41s KubeDB Ops-manager Operator Reloading performed successfully in PgBouncer: demo/pb-custom for PgBouncerOpsRequest: pbops-reconfigure-apply + Normal Starting 41s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-custom + Normal Successful 41s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-custom + Normal Successful 41s KubeDB Ops-manager Operator Controller has Successfully Reconfigured PgBouncer databases: demo/pb-custom + Normal Starting 41s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-custom + Normal Successful 41s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-custom + Normal Successful 41s KubeDB Ops-manager Operator Controller has Successfully Reconfigured PgBouncer databases: demo/pb-custom +``` + +Now let's exec into the pgbouncer pod and check the new configuration we have provided. + +```bash +$ kubectl exec -it -n demo pb-custom-0 -- /bin/sh +pb-custom-0:/$ cat etc/config/pgbouncer.ini +[databases] +postgres= host=ha-postgres.demo.svc port=5432 dbname=postgres + +[pgbouncer] +stats_period = 60 +pidfile = /tmp/pgbouncer.pid +pool_mode = session +reserve_pool_timeout = 5 +max_client_conn = 87 +min_pool_size = 1 +default_pool_size = 2 +listen_addr = * +max_db_connections = 1 +max_user_connections = 2 +auth_type=scram-sha-256 +ignore_startup_parameters = extra_float_digits +admin_users = pgbouncer +auth_file = /var/run/pgbouncer/secret/userlist +logfile = /tmp/pgbouncer.log +listen_port = 5432 +reserve_pool_size = 5 +pb-custom-0:/$ exit +exit +``` + +As we can see from the configuration of running pgbouncer, the value of `auth_type` has been changed from `md5` to `scram-sha-256`. So the reconfiguration of the pgbouncer using the `applyConfig` field is successful. + +### Remove config + +This will remove all the custom config previously provided. After this Ops-manager operator will merge the new given config with the default config and apply this. + +- `spec.databaseRef.name` specifies that we are reconfiguring `pb-custom` pgbouncer. +- `spec.type` specifies that we are performing `Reconfigure` on our pgbouncer. +- `spec.configuration.pgbouncer.removeCustomConfig` specifies for boolean values to remove previous custom configuration. + + + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: +```bash +kubectl delete -n demo pb/pb-custom +kubectl delete pgbounceropsrequest -n demo pbops-reconfigure pbops-reconfigure-apply +kubectl delete pg -n demo ha-postgres +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/guides/pgbouncer/restart/_index.md b/docs/guides/pgbouncer/restart/_index.md new file mode 100644 index 0000000000..9b528e5b81 --- /dev/null +++ b/docs/guides/pgbouncer/restart/_index.md @@ -0,0 +1,10 @@ +--- +title: Restart PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-restart + name: Restart + parent: pb-pgbouncer-guides + weight: 50 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/pgbouncer/restart/restart.md b/docs/guides/pgbouncer/restart/restart.md new file mode 100644 index 0000000000..688411ccd4 --- /dev/null +++ b/docs/guides/pgbouncer/restart/restart.md @@ -0,0 +1,189 @@ +--- +title: Restart PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-restart-details + name: Restart PgBouncer + parent: pb-restart + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Restart PgBouncer + +KubeDB supports restarting the PgBouncer via a PgBouncerOpsRequest. Restarting is useful if some pods are got stuck in some phase, or they are not working correctly. This tutorial will show you how to use that. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. + +```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: YAML files used in this tutorial are stored in [docs/examples/pgbouncer](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/pgbouncer) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +## Deploy PgBouncer + +In this section, we are going to deploy a PgBouncer using KubeDB. + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/restart/pgbouncer.yaml +pgbouncer.kubedb.com/pgbouncer created +``` + +## Apply Restart opsRequest + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: restart-pgbouncer + namespace: demo +spec: + type: Restart + databaseRef: + name: pgbouncer + timeout: 3m + apply: Always +``` + +- `spec.type` specifies the Type of the ops Request +- `spec.databaseRef` holds the name of the PgBouncer. The pgbouncer should be available in the same namespace as the opsRequest +- The meaning of `spec.timeout` & `spec.apply` fields will be found [here](/docs/guides/pgbouncer/concepts/opsrequest.md#spectimeout) + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/restart/ops.yaml +pgbounceropsrequest.ops.kubedb.com/restart-pgbouncer created +``` + +Now the Ops-manager operator will restart the pods one by one. + +```shell +$ kubectl get pbops -n demo +NAME TYPE STATUS AGE +restart-pgbouncer Restart Successful 79s + +$ kubectl get pbops -n demo -oyaml restart-pgbouncer +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"ops.kubedb.com/v1alpha1","kind":"PgBouncerOpsRequest","metadata":{"annotations":{},"name":"restart-pgbouncer","namespace":"demo"},"spec":{"apply":"Always","databaseRef":{"name":"pgbouncer"},"timeout":"3m","type":"Restart"}} + creationTimestamp: "2024-07-18T06:49:50Z" + generation: 1 + name: restart-pgbouncer + namespace: demo + resourceVersion: "94394" + uid: 8d3387fc-0c21-4e14-8bed-857a7cdf5423 +spec: + apply: Always + databaseRef: + name: pgbouncer + timeout: 3m + type: Restart +status: + conditions: + - lastTransitionTime: "2024-07-18T06:49:50Z" + message: PgBouncer ops-request has started to restart pgbouncer nodes + observedGeneration: 1 + reason: Restart + status: "True" + type: Restart + - lastTransitionTime: "2024-07-18T06:49:53Z" + message: Successfully paused database + observedGeneration: 1 + reason: DatabasePauseSucceeded + status: "True" + type: DatabasePauseSucceeded + - lastTransitionTime: "2024-07-18T06:50:38Z" + message: Successfully Restarted PgBouncer nodes + observedGeneration: 1 + reason: RestartNodes + status: "True" + type: RestartNodes + - lastTransitionTime: "2024-07-18T06:49:58Z" + message: get pod; ConditionStatus:True; PodName:pgbouncer-0 + observedGeneration: 1 + status: "True" + type: GetPod--pgbouncer-0 + - lastTransitionTime: "2024-07-18T06:49:58Z" + message: evict pod; ConditionStatus:True; PodName:pgbouncer-0 + observedGeneration: 1 + status: "True" + type: EvictPod--pgbouncer-0 + - lastTransitionTime: "2024-07-18T06:50:33Z" + message: check pod running; ConditionStatus:True; PodName:pgbouncer-0 + observedGeneration: 1 + status: "True" + type: CheckPodRunning--pgbouncer-0 + - lastTransitionTime: "2024-07-18T06:50:38Z" + message: Controller has successfully restart the PgBouncer replicas + observedGeneration: 1 + reason: Successful + status: "True" + type: Successful + observedGeneration: 1 + phase: Successful +``` + + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete pgbounceropsrequest -n demo restart-pgbouncer +kubectl delete pgbouncer -n demo pgbouncer +kubectl delete ns demo +``` + +## Next Steps + +- Detail concepts of [PgBouncer object](/docs/guides/pgbouncer/concepts/pgbouncer.md). +- Monitor your PgBouncer database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). +- Monitor your PgBouncer database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). +- Detail concepts of [PgBouncer object](/docs/guides/pgbouncer/concepts/pgbouncer.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/pgbouncer/scaling/_index.md b/docs/guides/pgbouncer/scaling/_index.md new file mode 100644 index 0000000000..dd2566b603 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Scaling PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-scaling + name: Scaling + parent: pb-pgbouncer-guides + weight: 43 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/horizontal-scaling/_index.md b/docs/guides/pgbouncer/scaling/horizontal-scaling/_index.md new file mode 100644 index 0000000000..96af9d6773 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/horizontal-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Horizontal Scaling +menu: + docs_{{ .version }}: + identifier: pb-horizontal-scaling + name: Horizontal Scaling + parent: pb-scaling + weight: 10 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/horizontal-scaling/horizontal-ops.md b/docs/guides/pgbouncer/scaling/horizontal-scaling/horizontal-ops.md new file mode 100644 index 0000000000..f11927fc8b --- /dev/null +++ b/docs/guides/pgbouncer/scaling/horizontal-scaling/horizontal-ops.md @@ -0,0 +1,396 @@ +--- +title: Horizontal Scaling PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-horizontal-scaling-ops + name: HorizontalScaling OpsRequest + parent: pb-horizontal-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Horizontal Scale PgBouncer + +This guide will show you how to use `KubeDB` Ops-manager operator to scale the replicaset of a PgBouncer. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + - [Horizontal Scaling Overview](/docs/guides/pgbouncer/scaling/horizontal-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgbouncer](/docs/examples/pgbouncer) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Horizontal Scaling on pgbouncer + +Here, we are going to deploy a `PgBouncer` using a supported version by `KubeDB` operator. Then we are going to apply horizontal scaling on it. + +### Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +### Prepare PgBouncer + +Now, we are going to deploy a `PgBouncer` with version `1.23.1`. + +### Deploy PgBouncer + +In this section, we are going to deploy a PgBouncer. Then, in the next section we will scale the pgbouncer using `PgBouncerOpsRequest` CRD. Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-horizontal + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut +``` +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/scaling/pb-horizontal.yaml +pgbouncer.kubedb.com/pb-horizontal created +``` + +Now, wait until `pb-horizontal ` has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo +NAME VERSION STATUS AGE +pb-horizontal 1.18.0 Ready 2m19s +``` + +Let's check the number of replicas this pgbouncer has from the PgBouncer object, number of pods the petset have, + +```bash +$ kubectl get pgbouncer -n demo pb-horizontal -o json | jq '.spec.replicas' +1 + +$ kubectl get petset -n demo pb-horizontal -o json | jq '.spec.replicas' +1 +``` + +We can see from both command that the pgbouncer has 1 replicas. + +We are now ready to apply the `PgBouncerOpsRequest` CR to scale this pgbouncer. + +## Scale Up Replicas + +Here, we are going to scale up the replicas of the pgbouncer to meet the desired number of replicas after scaling. + +#### Create PgBouncerOpsRequest + +In order to scale up the replicas of the pgbouncer, we have to create a `PgBouncerOpsRequest` CR with our desired replicas. Below is the YAML of the `PgBouncerOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-horizontal-scale-up + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pb-horizontal + horizontalScaling: + replicas: 3 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling operation on `pb-horizontal` pgbouncer. +- `spec.type` specifies that we are performing `HorizontalScaling` on our pgbouncer. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/scaling/horizontal-scaling-ops.yaml +pgbounceropsrequest.ops.kubedb.com/pgbouncer-horizontal-scale-up created +``` + +#### Verify replicas scaled up successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `PgBouncer` object and related `PetSet`. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pgbouncer-horizontal-scale-up HorizontalScaling Successful 2m49s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to scale the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pgbouncer-horizontal-scale-up +Name: pgbouncer-horizontal-scale-up +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-27T11:12:29Z + Generation: 1 + Resource Version: 49162 + UID: ce390f66-e10f-490f-ad47-f28894d0569a +Spec: + Apply: IfReady + Database Ref: + Name: pb-horizontal + Horizontal Scaling: + Replicas: 3 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-27T11:12:29Z + Message: Controller has started to Progress with HorizontalScaling of PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-up + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-27T11:12:32Z + Message: Horizontal scaling started in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-up + Observed Generation: 1 + Reason: HorizontalScaleStarted + Status: True + Type: HorizontalScale + Last Transition Time: 2024-11-27T11:12:37Z + Message: patch p s; ConditionStatus:True; PodName:pb-horizontal-1 + Observed Generation: 1 + Status: True + Type: PatchPS--pb-horizontal-1 + Last Transition Time: 2024-11-27T11:12:42Z + Message: is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-1 + Observed Generation: 1 + Status: True + Type: IsPgBouncerRunning--pb-horizontal-1 + Last Transition Time: 2024-11-27T11:12:47Z + Message: patch p s; ConditionStatus:True; PodName:pb-horizontal-2 + Observed Generation: 1 + Status: True + Type: PatchPS--pb-horizontal-2 + Last Transition Time: 2024-11-27T11:12:52Z + Message: is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-2 + Observed Generation: 1 + Status: True + Type: IsPgBouncerRunning--pb-horizontal-2 + Last Transition Time: 2024-11-27T11:12:57Z + Message: Horizontal scaling Up performed successfully in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-up + Observed Generation: 1 + Reason: HorizontalScaleSucceeded + Status: True + Type: HorizontalScaleUp + Last Transition Time: 2024-11-27T11:13:07Z + Message: Controller has successfully completed with HorizontalScaling of PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-up + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m13s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-up + Normal Starting 2m13s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-horizontal + Normal Successful 2m13s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-up + Normal Starting 2m10s KubeDB Ops-manager Operator Horizontal scaling started in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-up + Warning patch p s; ConditionStatus:True; PodName:pb-horizontal-1 2m5s KubeDB Ops-manager Operator patch p s; ConditionStatus:True; PodName:pb-horizontal-1 + Warning is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-1 2m KubeDB Ops-manager Operator is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-1 + Warning patch p s; ConditionStatus:True; PodName:pb-horizontal-2 115s KubeDB Ops-manager Operator patch p s; ConditionStatus:True; PodName:pb-horizontal-2 + Warning is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-2 110s KubeDB Ops-manager Operator is pg bouncer running; ConditionStatus:True; PodName:pb-horizontal-2 + Normal Successful 105s KubeDB Ops-manager Operator Horizontal scaling Up performed successfully in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-up + Normal Starting 95s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-horizontal + Normal Successful 95s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-horizontal + Normal Successful 95s KubeDB Ops-manager Operator Controller has Successfully scaled the PgBouncer database: demo/pb-horizontal +``` + +Now, we are going to verify the number of replicas this pgbouncer has from the PgBouncer object, number of pods the petset have, + +```bash +$ kubectl get pb -n demo pb-horizontal -o json | jq '.spec.replicas' +3 + +$ kubectl get petset -n demo pb-horizontal -o json | jq '.spec.replicas' +3 +``` +From all the above outputs we can see that the replicas of the pgbouncer is `3`. That means we have successfully scaled up the replicas of the PgBouncer. + + +### Scale Down Replicas + +Here, we are going to scale down the replicas of the pgbouncer to meet the desired number of replicas after scaling. + +#### Create PgBouncerOpsRequest + +In order to scale down the replicas of the pgbouncer, we have to create a `PgBouncerOpsRequest` CR with our desired replicas. Below is the YAML of the `PgBouncerOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-horizontal-scale-down + namespace: demo +spec: + type: HorizontalScaling + databaseRef: + name: pb-horizontal + horizontalScaling: + replicas: 2 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing horizontal scaling down operation on `pb-horizontal` pgbouncer. +- `spec.type` specifies that we are performing `HorizontalScaling` on our pgbouncer. +- `spec.horizontalScaling.replicas` specifies the desired replicas after scaling. + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/scaling/horizontal-scaling-down-ops.yaml +pgbounceropsrequest.ops.kubedb.com/pgbouncer-horizontal-scale-down created +``` + +#### Verify replicas scaled down successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the replicas of `PgBouncer` object and related `PetSet`. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pgbouncer-horizontal-scale-down HorizontalScaling Successful 75s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to scale the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pgbouncer-horizontal-scale-down +Name: pgbouncer-horizontal-scale-down +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-27T11:16:05Z + Generation: 1 + Resource Version: 49481 + UID: cf4bc042-8316-4dce-b6a2-60981af7f4db +Spec: + Apply: IfReady + Database Ref: + Name: pb-horizontal + Horizontal Scaling: + Replicas: 2 + Type: HorizontalScaling +Status: + Conditions: + Last Transition Time: 2024-11-27T11:16:05Z + Message: Controller has started to Progress with HorizontalScaling of PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-down + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-27T11:16:08Z + Message: Horizontal scaling started in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-down + Observed Generation: 1 + Reason: HorizontalScaleStarted + Status: True + Type: HorizontalScale + Last Transition Time: 2024-11-27T11:16:13Z + Message: patch p s; ConditionStatus:True; PodName:pb-horizontal-3 + Observed Generation: 1 + Status: True + Type: PatchPS--pb-horizontal-3 + Last Transition Time: 2024-11-27T11:16:18Z + Message: get pod; ConditionStatus:True; PodName:pb-horizontal-2 + Observed Generation: 1 + Status: True + Type: GetPod--pb-horizontal-2 + Last Transition Time: 2024-11-27T11:16:23Z + Message: Horizontal scaling down performed successfully in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-down + Observed Generation: 1 + Reason: HorizontalScaleSucceeded + Status: True + Type: HorizontalScaleDown + Last Transition Time: 2024-11-27T11:16:33Z + Message: Controller has successfully completed with HorizontalScaling of PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-down + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m38s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pgbouncer-horizontal-scale-down + Normal Starting 2m38s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-horizontal + Normal Successful 2m38s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-down + Normal Starting 2m35s KubeDB Ops-manager Operator Horizontal scaling started in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-down + Warning patch p s; ConditionStatus:True; PodName:pb-horizontal-3 2m30s KubeDB Ops-manager Operator patch p s; ConditionStatus:True; PodName:pb-horizontal-3 + Warning get pod; ConditionStatus:True; PodName:pb-horizontal-2 2m25s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-horizontal-2 + Normal Successful 2m20s KubeDB Ops-manager Operator Horizontal scaling down performed successfully in PgBouncer: demo/pb-horizontal for PgBouncerOpsRequest: pgbouncer-horizontal-scale-down + Normal Starting 2m10s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-horizontal + Normal Successful 2m10s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-horizontal + Normal Successful 2m10s KubeDB Ops-manager Operator Controller has Successfully scaled the PgBouncer database: demo/pb-horizontal +``` + +Now, we are going to verify the number of replicas this pgbouncer has from the PgBouncer object, number of pods the petset have, + +```bash +$ kubectl get pb -n demo pb-horizontal -o json | jq '.spec.replicas' +2 + +$ kubectl get petset -n demo pb-horizontal -o json | jq '.spec.replicas' +2 +``` +From all the above outputs we can see that the replicas of the pgbouncer is `2`. That means we have successfully scaled down the replicas of the PgBouncer. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete pb -n demo pb-horizontal +kubectl delete pgbounceropsrequest -n demo pgbouncer-horizontal-scale-down +``` \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/horizontal-scaling/overview.md b/docs/guides/pgbouncer/scaling/horizontal-scaling/overview.md new file mode 100644 index 0000000000..44681355b9 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/horizontal-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: PgBouncer Horizontal Scaling Overview +menu: + docs_{{ .version }}: + identifier: pb-horizontal-scaling-overview + name: Overview + parent: pb-horizontal-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# PgBouncer Horizontal Scaling + +This guide will give an overview on how KubeDB Ops-manager operator scales up or down `PgBouncer` replicas of PetSet. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + +## How Horizontal Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator scales up or down `PgBouncer` database components. Open the image in a new tab to see the enlarged version. + +
+  Horizontal scaling process of PgBouncer +
Fig: Horizontal scaling process of PgBouncer
+
+ +The Horizontal scaling process consists of the following steps: + +1. At first, a user creates a `PgBouncer` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `PgBouncer` CR. + +3. When the operator finds a `PgBouncer` CR, it creates `PetSet` and related necessary stuff like secrets, services, etc. + +4. Then, in order to scale the `PetSet` of the `PgBouncer` database the user creates a `PgBouncerOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `PgBouncerOpsRequest` CR. + +6. When it finds a `PgBouncerOpsRequest` CR, it pauses the `PgBouncer` object which is referred from the `PgBouncerOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `PgBouncer` object during the horizontal scaling process. + +7. Then the `KubeDB` Ops-manager operator will scale the related PetSet Pods to reach the expected number of replicas defined in the `PgBouncerOpsRequest` CR. + +8. After the successfully scaling the replicas of the related PetSet Pods, the `KubeDB` Ops-manager operator updates the number of replicas in the `PgBouncer` object to reflect the updated state. + +9. After the successful scaling of the `PgBouncer` replicas, the `KubeDB` Ops-manager operator resumes the `PgBouncer` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step-by-step guide on horizontal scaling of PgBouncer using `PgBouncerOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/vertical-scaling/_index.md b/docs/guides/pgbouncer/scaling/vertical-scaling/_index.md new file mode 100644 index 0000000000..da67c6d842 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/vertical-scaling/_index.md @@ -0,0 +1,10 @@ +--- +title: Vertical Scaling +menu: + docs_{{ .version }}: + identifier: pb-vertical-scaling + name: Vertical Scaling + parent: pb-scaling + weight: 20 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/vertical-scaling/overview.md b/docs/guides/pgbouncer/scaling/vertical-scaling/overview.md new file mode 100644 index 0000000000..77839a6144 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/vertical-scaling/overview.md @@ -0,0 +1,54 @@ +--- +title: PgBouncer Vertical Scaling Overview +menu: + docs_{{ .version }}: + identifier: pb-vertical-scaling-overview + name: Overview + parent: pb-vertical-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# PgBouncer Vertical Scaling + +This guide will give an overview on how KubeDB Ops-manager operator updates the resources(for example CPU and Memory etc.) of the `PgBouncer`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + +## How Vertical Scaling Process Works + +The following diagram shows how KubeDB Ops-manager operator updates the resources of the `PgBouncer`. Open the image in a new tab to see the enlarged version. + +
+  Vertical scaling process of PgBouncer +
Fig: Vertical scaling process of PgBouncer
+
+ +The vertical scaling process consists of the following steps: + +1. At first, a user creates a `PgBouncer` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `PgBouncer` CR. + +3. When the operator finds a `PgBouncer` CR, it creates `PetSet` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the resources(for example `CPU`, `Memory` etc.) of the `PgBouncer`, the user creates a `PgBouncerOpsRequest` CR with desired information. + +5. `KubeDB` Ops-manager operator watches the `PgBouncerOpsRequest` CR. + +6. When it finds a `PgBouncerOpsRequest` CR, it pauses the `PgBouncer` object which is referred from the `PgBouncerOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `PgBouncer` object during the vertical scaling process. + +7. Then the `KubeDB` Ops-manager operator will update resources of the PetSet to reach desired state. + +8. After the successful update of the resources of the PetSet's replica, the `KubeDB` Ops-manager operator updates the `PgBouncer` object to reflect the updated state. + +9. After the successful update of the `PgBouncer` resources, the `KubeDB` Ops-manager operator resumes the `PgBouncer` object so that the `KubeDB` Provisioner operator resumes its usual operations. + +In the next docs, we are going to show a step-by-step guide on updating resources of PgBouncer `PgBouncerOpsRequest` CRD. \ No newline at end of file diff --git a/docs/guides/pgbouncer/scaling/vertical-scaling/vertical-ops.md b/docs/guides/pgbouncer/scaling/vertical-scaling/vertical-ops.md new file mode 100644 index 0000000000..eb3bb8da08 --- /dev/null +++ b/docs/guides/pgbouncer/scaling/vertical-scaling/vertical-ops.md @@ -0,0 +1,304 @@ +--- +title: Vertical Scaling PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-vertical-scaling-ops + name: VerticalScaling OpsRequest + parent: pb-vertical-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Vertical Scale PgBouncer + +This guide will show you how to use `KubeDB` Ops-manager operator to update the resources of a PgBouncer. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + - [Vertical Scaling Overview](/docs/guides/pgbouncer/scaling/vertical-scaling/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgbouncer](/docs/examples/pgbouncer) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +## Apply Vertical Scaling on PgBouncer + +Here, we are going to deploy a `PgBouncer` using a supported version by `KubeDB` operator. Then we are going to apply vertical scaling on it. + +### Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +### Prepare PgBouncer + +Now, we are going to deploy a `PgBouncer` with version `1.18.0`. + +### Deploy PgBouncer + +In this section, we are going to deploy a PgBouncer. Then, in the next section we will update the resources using `PgBouncerOpsRequest` CRD. Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-vertical + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/scaling/pb-vertical.yaml +pgbouncer.kubedb.com/pb-vertical created +``` + +Now, wait until `pb-vertical` has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo +NAME TYPE VERSION STATUS AGE +pb-vertical kubedb.com/v1 1.18.0 Ready 17s +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo pb-vertical-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } +} +``` + +You can see the Pod has default resources which is assigned by the KubeDB operator. + +We are now ready to apply the `PgBouncerOpsRequest` CR to update the resources of this pgbouncer. + +### Vertical Scaling + +Here, we are going to update the resources of the pgbouncer to meet the desired resources after scaling. + +#### Create PgBouncerOpsRequest + +In order to update the resources of the pgbouncer, we have to create a `PgBouncerOpsRequest` CR with our desired resources. Below is the YAML of the `PgBouncerOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-scale-vertical + namespace: demo +spec: + type: VerticalScaling + databaseRef: + name: pb-vertical + verticalScaling: + pgbouncer: + resources: + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "2Gi" + cpu: "1" + timeout: 5m + apply: IfReady +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `pb-vertical` pgbouncer. +- `spec.type` specifies that we are performing `VerticalScaling` on our database. +- `spec.VerticalScaling.pgbouncer` specifies the desired resources after scaling. +- Have a look [here](/docs/guides/pgbouncer/concepts/opsrequest.md) on the respective sections to understand the `timeout` & `apply` fields. + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/scaling/pb-vertical-ops.yaml +pgbounceropsrequest.ops.kubedb.com/pgbouncer-scale-vertical created +``` + +#### Verify PgBouncer resources updated successfully + +If everything goes well, `KubeDB` Ops-manager operator will update the resources of `PgBouncer` object and related `PetSet` and `Pods`. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pgbouncer-scale-vertical VerticalScaling Successful 3m42s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to scale the pgbouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pgbouncer-scale-vertical +Name: pgbouncer-scale-vertical +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-27T12:35:02Z + Generation: 1 + Resource Version: 55854 + UID: 567e12f9-b561-4fea-af91-1ed9412a0d74 +Spec: + Apply: IfReady + Database Ref: + Name: pb-vertical + Timeout: 5m + Type: VerticalScaling + Vertical Scaling: + Pgbouncer: + Resources: + Limits: + Cpu: 1 + Memory: 2Gi + Requests: + Cpu: 1 + Memory: 2Gi +Status: + Conditions: + Last Transition Time: 2024-11-27T12:35:02Z + Message: Controller has started to Progress with VerticalScaling of PgBouncerOpsRequest: demo/pgbouncer-scale-vertical + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-27T12:35:08Z + Message: Successfully updated Petset resource + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-27T12:35:13Z + Message: get pod; ConditionStatus:True; PodName:pb-vertical-0 + Observed Generation: 1 + Status: True + Type: GetPod--pb-vertical-0 + Last Transition Time: 2024-11-27T12:35:13Z + Message: evict pod; ConditionStatus:True; PodName:pb-vertical-0 + Observed Generation: 1 + Status: True + Type: EvictPod--pb-vertical-0 + Last Transition Time: 2024-11-27T12:35:18Z + Message: check replica func; ConditionStatus:True; PodName:pb-vertical-0 + Observed Generation: 1 + Status: True + Type: CheckReplicaFunc--pb-vertical-0 + Last Transition Time: 2024-11-27T12:35:18Z + Message: check pod ready; ConditionStatus:True; PodName:pb-vertical-0 + Observed Generation: 1 + Status: True + Type: CheckPodReady--pb-vertical-0 + Last Transition Time: 2024-11-27T12:35:38Z + Message: check pg bouncer running; ConditionStatus:True; PodName:pb-vertical-0 + Observed Generation: 1 + Status: True + Type: CheckPgBouncerRunning--pb-vertical-0 + Last Transition Time: 2024-11-27T12:35:43Z + Message: Vertical scaling Up performed successfully in PgBouncer: demo/pb-vertical for PgBouncerOpsRequest: pgbouncer-scale-vertical + Observed Generation: 1 + Reason: VerticalScaleSucceeded + Status: True + Type: VerticalScale + Last Transition Time: 2024-11-27T12:35:53Z + Message: Controller has successfully completed with VerticalScaling of PgBouncerOpsRequest: demo/pgbouncer-scale-vertical + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 81s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pgbouncer-scale-vertical + Normal Starting 81s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-vertical + Normal Successful 81s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-vertical for PgBouncerOpsRequest: pgbouncer-scale-vertical + Warning get pod; ConditionStatus:True; PodName:pb-vertical-0 70s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-vertical-0 + Warning evict pod; ConditionStatus:True; PodName:pb-vertical-0 70s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:pb-vertical-0 + Warning check replica func; ConditionStatus:True; PodName:pb-vertical-0 65s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-vertical-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-vertical-0 65s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-vertical-0 + Warning check pg bouncer running; ConditionStatus:False; PodName:pb-vertical-0 55s KubeDB Ops-manager Operator check pg bouncer running; ConditionStatus:False; PodName:pb-vertical-0 + Warning check replica func; ConditionStatus:True; PodName:pb-vertical-0 55s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-vertical-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-vertical-0 55s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-vertical-0 + Warning check replica func; ConditionStatus:True; PodName:pb-vertical-0 45s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-vertical-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-vertical-0 45s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-vertical-0 + Warning check pg bouncer running; ConditionStatus:True; PodName:pb-vertical-0 45s KubeDB Ops-manager Operator check pg bouncer running; ConditionStatus:True; PodName:pb-vertical-0 + Normal Successful 40s KubeDB Ops-manager Operator Vertical scaling Up performed successfully in PgBouncer: demo/pb-vertical for PgBouncerOpsRequest: pgbouncer-scale-vertical + Normal Starting 30s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-vertical + Normal Successful 30s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-vertical + Normal Starting 30s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-vertical + Normal Successful 30s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-vertical + Normal Successful 30s KubeDB Ops-manager Operator Controller has Successfully scaled the PgBouncer database: demo/pb-vertical +``` + +Now, we are going to verify from the Pod yaml whether the resources of the pgbouncer has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo pb-vertical-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "1", + "memory": "2Gi" + } +} +``` + +The above output verifies that we have successfully scaled up the resources of the PgBouncer. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete pb -n demo pb-vertical +kubectl delete pgbounceropsrequest -n demo pgbouncer-scale-vertical +``` \ No newline at end of file diff --git a/docs/guides/pgbouncer/sync-users/_index.md b/docs/guides/pgbouncer/sync-users/_index.md new file mode 100755 index 0000000000..43e4e2a7a8 --- /dev/null +++ b/docs/guides/pgbouncer/sync-users/_index.md @@ -0,0 +1,10 @@ +--- +title: Runtime users sync to PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-sync-users + name: Sync Users + parent: pb-pgbouncer-guides + weight: 30 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/pgbouncer/sync-users/sync-users-pgbouncer.md b/docs/guides/pgbouncer/sync-users/sync-users-pgbouncer.md new file mode 100644 index 0000000000..8184e0bc8a --- /dev/null +++ b/docs/guides/pgbouncer/sync-users/sync-users-pgbouncer.md @@ -0,0 +1,195 @@ +--- +title: Runtime users sync to PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-sync-users-pgbouncer + name: Sync users pgbouncer + parent: pb-sync-users + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Using Sync Users + +KubeDB supports providing a way to add/update users to PgBouncer in runtime simply by creating secret with defined keys and labels. This tutorial will show you how to use KubeDB to sync a user to PgBouncer on runtime. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Now, install KubeDB cli on your workstation and KubeDB operator in your cluster following the steps [here](/docs/setup/README.md). + +- To keep things isolated, this tutorial uses a separate namespace called `demo` throughout this tutorial. Run the following command to prepare your cluster for this tutorial: + + ```bash + $ kubectl create ns demo + namespace/demo created + ``` + +> Note: The yaml files used in this tutorial are stored in [docs/examples/pgbouncer](https://github.com/kubedb/docs/tree/{{< param "info.version" >}}/docs/examples/pgbouncer) folder in GitHub repository [kubedb/docs](https://github.com/kubedb/docs). + +## Overview + +KubeDB operator allows us to sync additional Postgres users to PgBouncer on runtime by setting `spec.syncUsers` to `true`, if this option is true KubeDB operator searches for secrets in the namespace of the Postgres mentioned with some certain labels. Then if the secret have username and password as key KubeDB operator will sync the username and password to PgBouncer. Again not only to add a user but also this feature can also be used for updating a user's password. + +At first, we need to create a secret that contains a `user` key and a `password` key which contains the `username` and `password` respectively. Also, we need to add two labels `` and `postgreses.kubedb.com`. The namespace must be ``. Below given a sample structure of the secret. + +Example: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/instance: ha-postgres + app.kubernetes.io/name: postgreses.kubedb.com + name: pg-user + namespace: demo +stringData: + password: "12345" + username: "alice" +``` +- `app.kubernetes.io/instance` should be same as`appbinding name mentioned in .spec.postgresRef.name`. +- `app.kubernetes.io/name` should be `postgreses.kubedb.com`. +- `namespace` should be same as `namespace mentioned in .spec.postgresRef.namespace`. + +In every `20 seconds` KubeDB operator will sync all the users to PgBouncer. + +Secrets provided by users are not managed by KubeDB, and therefore, won't be modified or garbage collected by the KubeDB operator (version 0.13.0 and higher). + +### Prepare Postgres +For a PgBouncer surely we will need a Postgres server so, prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +### Prepare PgBouncer + +Now, we are going to deploy a `PgBouncer` with version `1.23.1`. + +### Deploy PgBouncer + +Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pgbouncer-sync + namespace: demo +spec: + version: "1.23.1" + replicas: 1 + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/sync-users/pgbouncer-sync.yaml +pgbouncer.kubedb.com/pgbouncer-sync created +``` + +Now, wait until `pgbouncer-sync` has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo +NAME TYPE VERSION STATUS AGE +pgbouncer-sync kubedb.com/v1 1.18.0 Ready 41s +``` + +### Sync Users + +Now, create a secret with structure defined [here](/docs/guides/pgbouncer/concepts/pgbouncer.md#specsyncusers). Below is the YAML of the `secret` that we are going to create, + +```yaml +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/instance: ha-postgres + app.kubernetes.io/name: postgreses.kubedb.com + name: sync-secret + namespace: demo +stringData: + password: "12345" + username: "john" +``` + +Now, create the secret by applying the yaml above. + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/sync-users/secret.yaml +secret/sync-secret created +``` + +Now, after `20 seconds` you can exec into the pgbouncer pod and find if the new user is there, + +```bash +$ kubectl exec -it -n demo pgbouncer-sync-0 -- /bin/sh +/$ cat /var/run/pgbouncer/secret/userlist +"postgres" "md5AESOmAkfj+zX8zXLm92d6Vup6a5yASiiGScoHNDTIgBwH8=" +"john" "md5AEScbLKDSMb+KVrILhh7XEmyQ==" +"pgbouncer" "md5AESOmAkfj+zX8zXLm92d6Vup6a5yASiiGScoHNDTIgBwH8=" +/$ exit +exit +``` +We can see that the user is there in PgBouncer. So, now let's create this user and try to use this user through PgBouncer. +Now, you can connect to this pgbouncer through [psql](https://www.postgresql.org/docs/current/app-psql.html). Before that we need to port-forward to the primary service of pgbouncer. + +```bash +$ kubectl port-forward svc/pgbouncer-sync -n demo 9999:5432 +Forwarding from 127.0.0.1:9999 -> 5432 +Forwarding from [::1]:9999 -> 5432 +``` +We will use the root Postgres user to create the user, so let's get the password for the root user, so that we can use it. +```bash +$ kubectl get secrets -n demo ha-postgres-auth -o jsonpath='{.data.\password}' | base64 -d +qEeuU6cu5aH!O9CI⏎ +``` +We can use this password now, +```bash +$ export PGPASSWORD='qEeuU6cu5aH!O9CI' +$ psql --host=localhost --port=9999 --username=postgres postgres +psql (16.3 (Ubuntu 16.3-1.pgdg22.04+1), server 16.1) +Type "help" for help. + +postgres=# CREATE USER john WITH PASSWORD '12345'; +CREATE ROLE +postgres=# exit +``` +Now, let's use this john user. +```bash +$ export PGPASSWORD='12345' +$ psql --host=localhost --port=9999 --username=john postgres +psql (16.3 (Ubuntu 16.3-1.pgdg22.04+1), server 16.1) +Type "help" for help. + +postgres=> exit +``` +So, we can successfully verify that the user is registered in PgBouncer and also we can use it. + +## Cleaning up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete -n demo pb/pgbouncer-sync +kubectl delete -n demo secret/sync-secret +kubectl delete pg -n demo ha-postgres +kubectl delete ns demo +``` + +## Next Steps + +- Monitor your PgBouncer database with KubeDB using [out-of-the-box Prometheus operator](/docs/guides/pgbouncer/monitoring/using-prometheus-operator.md). +- Monitor your PgBouncer database with KubeDB using [out-of-the-box builtin-Prometheus](/docs/guides/pgbouncer/monitoring/using-builtin-prometheus.md). +- Detail concepts of [PgBouncer object](/docs/guides/pgbouncer/concepts/pgbouncer.md). +- Detail concepts of [PgBouncerVersion object](/docs/guides/pgbouncer/concepts/catalog.md). +- Want to hack on KubeDB? Check our [contribution guidelines](/docs/CONTRIBUTING.md). diff --git a/docs/guides/pgbouncer/update-version/_index.md b/docs/guides/pgbouncer/update-version/_index.md new file mode 100644 index 0000000000..98ba265037 --- /dev/null +++ b/docs/guides/pgbouncer/update-version/_index.md @@ -0,0 +1,10 @@ +--- +title: Updating PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-updating + name: Update Version + parent: pb-pgbouncer-guides + weight: 40 +menu_name: docs_{{ .version }} +--- \ No newline at end of file diff --git a/docs/guides/pgbouncer/update-version/overview.md b/docs/guides/pgbouncer/update-version/overview.md new file mode 100644 index 0000000000..5c6a208dc1 --- /dev/null +++ b/docs/guides/pgbouncer/update-version/overview.md @@ -0,0 +1,54 @@ +--- +title: Updating PgBouncer Overview +menu: + docs_{{ .version }}: + identifier: pb-updating-overview + name: Overview + parent: pb-updating + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# updating PgBouncer version Overview + +This guide will give you an overview on how KubeDB Ops-manager operator update the version of `PgBouncer`. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + +## How update version Process Works + +The following diagram shows how KubeDB Ops-manager operator used to update the version of `PgBouncer`. Open the image in a new tab to see the enlarged version. + +
+  updating Process of Kafka +
Fig: updating Process of Kafka
+
+ +The updating process consists of the following steps: + +1. At first, a user creates a `PgBouncer` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `PgBouncer` CR. + +3. When the operator finds a `PgBouncer` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to update the version of the `PgBouncer` the user creates a `PgBouncerOpsRequest` CR with the desired version. + +5. `KubeDB` Ops-manager operator watches the `PgBouncerOpsRequest` CR. + +6. When it finds a `PgBouncerOpsRequest` CR, it halts the `PgBouncer` object which is referred from the `PgBouncerOpsRequest`. So, the `KubeDB` Provisioner operator doesn't perform any operations on the `PgBouncer` object during the updating process. + +7. By looking at the target version from `PgBouncerOpsRequest` CR, `KubeDB` Ops-manager operator updates the image of the `PetSet`. + +8. After successfully updating the `PetSet` and their `Pods` images, the `KubeDB` Ops-manager operator updates the image of the `PgBouncer` object to reflect the updated state of the database. + +9. After successfully updating of `PgBouncer` object, the `KubeDB` Ops-manager operator resumes the `PgBouncer` object so that the `KubeDB` Provisioner operator can resume its usual operations. + +In the next doc, we are going to show a step-by-step guide on updating of a PgBouncer using updateVersion operation. \ No newline at end of file diff --git a/docs/guides/pgbouncer/update-version/update_version.md b/docs/guides/pgbouncer/update-version/update_version.md new file mode 100644 index 0000000000..a408d09694 --- /dev/null +++ b/docs/guides/pgbouncer/update-version/update_version.md @@ -0,0 +1,275 @@ +--- +title: Updating PgBouncer +menu: + docs_{{ .version }}: + identifier: pb-updating-pgbouncer + name: Updating PgBouncer + parent: pb-updating + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# update version of PgBouncer + +This guide will show you how to use `KubeDB` Ops-manager operator to update the version of `PgBouncer`. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. If you do not already have a cluster, you can create one by using [kind](https://kind.sigs.k8s.io/docs/user/quick-start/). + +- Install `KubeDB` Provisioner and Ops-manager operator in your cluster following the steps [here](/docs/setup/README.md). + +- You should be familiar with the following `KubeDB` concepts: + - [PgBouncer](/docs/guides/pgbouncer/concepts/pgbouncer.md) + - [PgBouncerOpsRequest](/docs/guides/pgbouncer/concepts/opsrequest.md) + - [Updating Overview](/docs/guides/pgbouncer/update-version/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +> **Note:** YAML files used in this tutorial are stored in [docs/examples/pgbouncer](/docs/examples/pgbouncer) directory of [kubedb/docs](https://github.com/kubedb/docs) repository. + +### Prepare Postgres +Prepare a KubeDB Postgres cluster using this [tutorial](/docs/guides/postgres/clustering/streaming_replication.md), or you can use any externally managed postgres but in that case you need to create an [appbinding](/docs/guides/pgbouncer/concepts/appbinding.md) yourself. In this tutorial we will use 3 node Postgres cluster named `ha-postgres`. + +### Prepare PgBouncer + +Now, we are going to deploy a `PgBouncer` with version `1.18.0`. + +### Deploy PgBouncer: + +In this section, we are going to deploy a PgBouncer. Then, in the next section we will update the version using `PgBouncerOpsRequest` CRD. Below is the YAML of the `PgBouncer` CR that we are going to create, + +```yaml +apiVersion: kubedb.com/v1 +kind: PgBouncer +metadata: + name: pb-update + namespace: demo +spec: + replicas: 1 + version: "1.18.0" + database: + syncUsers: true + databaseName: "postgres" + databaseRef: + name: "ha-postgres" + namespace: demo + connectionPool: + poolMode: session + port: 5432 + reservePoolSize: 5 + maxClientConnections: 87 + defaultPoolSize: 2 + minPoolSize: 1 + authType: md5 + deletionPolicy: WipeOut +``` + +Let's create the `PgBouncer` CR we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/update-version/pb-update.yaml +pgbouncer.kubedb.com/pb-update created +``` + +Now, wait until `pb-update` created has status `Ready`. i.e, + +```bash +$ kubectl get pb -n demo + NAME TYPE VERSION STATUS AGE + pb-update kubedb.com/v1 1.18.0 Ready 26s +``` + +We are now ready to apply the `PgBouncerOpsRequest` CR to update this PgBouncer. + +### update PgBouncer Version + +Here, we are going to update `PgBouncer` from `1.18.0` to `1.23.1`. + +#### Create PgBouncerOpsRequest: + +In order to update the PgBouncer, we have to create a `PgBouncerOpsRequest` CR with your desired version that is supported by `KubeDB`. Below is the YAML of the `PgBouncerOpsRequest` CR that we are going to create, + +```yaml +apiVersion: ops.kubedb.com/v1alpha1 +kind: PgBouncerOpsRequest +metadata: + name: pgbouncer-version-update + namespace: demo +spec: + type: UpdateVersion + databaseRef: + name: pb-update + updateVersion: + targetVersion: 1.23.1 +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing operation on `pb-update` PgBouncer. +- `spec.type` specifies that we are going to perform `UpdateVersion` on our PgBouncer. +- `spec.updateVersion.targetVersion` specifies the expected version of the PgBouncer `1.23.1`. + + +Let's create the `PgBouncerOpsRequest` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/pgbouncer/update-version/pbops-update.yaml +pgbounceropsrequest.ops.kubedb.com/pgbouncer-version-update created +``` + +#### Verify PgBouncer version updated successfully : + +If everything goes well, `KubeDB` Ops-manager operator will update the image of `PgBouncer` object and related `PetSets` and `Pods`. + +Let's wait for `PgBouncerOpsRequest` to be `Successful`. Run the following command to watch `PgBouncerOpsRequest` CR, + +```bash +$ watch kubectl get pgbounceropsrequest -n demo +Every 2.0s: kubectl get pgbounceropsrequest -n demo +NAME TYPE STATUS AGE +pgbouncer-version-update UpdateVersion Successful 93s +``` + +We can see from the above output that the `PgBouncerOpsRequest` has succeeded. If we describe the `PgBouncerOpsRequest` we will get an overview of the steps that were followed to update the PgBouncer. + +```bash +$ kubectl describe pgbounceropsrequest -n demo pgbouncer-version-update +Name: pgbouncer-version-update +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PgBouncerOpsRequest +Metadata: + Creation Timestamp: 2024-11-27T09:40:03Z + Generation: 1 + Resource Version: 41823 + UID: a53940fd-4d2d-4b4b-8ef1-0419dfbce660 +Spec: + Apply: IfReady + Database Ref: + Name: pb-update + Type: UpdateVersion + Update Version: + Target Version: 1.23.1 +Status: + Conditions: + Last Transition Time: 2024-11-27T09:40:03Z + Message: Controller has started to Progress with UpdateVersion of PgBouncerOpsRequest: demo/pgbouncer-version-update + Observed Generation: 1 + Reason: Running + Status: True + Type: Running + Last Transition Time: 2024-11-27T09:40:08Z + Message: Successfully updated Petset resource + Observed Generation: 1 + Reason: UpdatePetSets + Status: True + Type: UpdatePetSets + Last Transition Time: 2024-11-27T09:40:13Z + Message: get pod; ConditionStatus:True; PodName:pb-update-0 + Observed Generation: 1 + Status: True + Type: GetPod--pb-update-0 + Last Transition Time: 2024-11-27T09:40:13Z + Message: evict pod; ConditionStatus:True; PodName:pb-update-0 + Observed Generation: 1 + Status: True + Type: EvictPod--pb-update-0 + Last Transition Time: 2024-11-27T09:40:18Z + Message: check replica func; ConditionStatus:True; PodName:pb-update-0 + Observed Generation: 1 + Status: True + Type: CheckReplicaFunc--pb-update-0 + Last Transition Time: 2024-11-27T09:40:18Z + Message: check pod ready; ConditionStatus:True; PodName:pb-update-0 + Observed Generation: 1 + Status: True + Type: CheckPodReady--pb-update-0 + Last Transition Time: 2024-11-27T09:40:48Z + Message: check pg bouncer running; ConditionStatus:True; PodName:pb-update-0 + Observed Generation: 1 + Status: True + Type: CheckPgBouncerRunning--pb-update-0 + Last Transition Time: 2024-11-27T09:40:53Z + Message: Restarting all pods performed successfully in PgBouncer: demo/pb-update for PgBouncerOpsRequest: pgbouncer-version-update + Observed Generation: 1 + Reason: RestartPodsSucceeded + Status: True + Type: RestartPods + Last Transition Time: 2024-11-27T09:41:04Z + Message: Successfully updated PgBouncer + Observed Generation: 1 + Reason: UpdateDatabase + Status: True + Type: UpdateDatabase + Last Transition Time: 2024-11-27T09:41:04Z + Message: Successfully version updated + Observed Generation: 1 + Reason: VersionUpdate + Status: True + Type: VersionUpdate + Last Transition Time: 2024-11-27T09:41:04Z + Message: Controller has successfully completed with UpdateVersion of PgBouncerOpsRequest: demo/pgbouncer-version-update + Observed Generation: 1 + Reason: Successful + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 114s KubeDB Ops-manager Operator Start processing for PgBouncerOpsRequest: demo/pgbouncer-version-update + Normal Starting 114s KubeDB Ops-manager Operator Pausing PgBouncer databse: demo/pb-update + Normal Successful 114s KubeDB Ops-manager Operator Successfully paused PgBouncer database: demo/pb-update for PgBouncerOpsRequest: pgbouncer-version-update + Warning get pod; ConditionStatus:True; PodName:pb-update-0 104s KubeDB Ops-manager Operator get pod; ConditionStatus:True; PodName:pb-update-0 + Warning evict pod; ConditionStatus:True; PodName:pb-update-0 104s KubeDB Ops-manager Operator evict pod; ConditionStatus:True; PodName:pb-update-0 + Warning check replica func; ConditionStatus:True; PodName:pb-update-0 99s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-update-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-update-0 99s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-update-0 + Warning check pg bouncer running; ConditionStatus:False; PodName:pb-update-0 89s KubeDB Ops-manager Operator check pg bouncer running; ConditionStatus:False; PodName:pb-update-0 + Warning check replica func; ConditionStatus:True; PodName:pb-update-0 89s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-update-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-update-0 89s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-update-0 + Warning check replica func; ConditionStatus:True; PodName:pb-update-0 79s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-update-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-update-0 79s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-update-0 + Warning check replica func; ConditionStatus:True; PodName:pb-update-0 69s KubeDB Ops-manager Operator check replica func; ConditionStatus:True; PodName:pb-update-0 + Warning check pod ready; ConditionStatus:True; PodName:pb-update-0 69s KubeDB Ops-manager Operator check pod ready; ConditionStatus:True; PodName:pb-update-0 + Warning check pg bouncer running; ConditionStatus:True; PodName:pb-update-0 69s KubeDB Ops-manager Operator check pg bouncer running; ConditionStatus:True; PodName:pb-update-0 + Normal Successful 64s KubeDB Ops-manager Operator Restarting all pods performed successfully in PgBouncer: demo/pb-update for PgBouncerOpsRequest: pgbouncer-version-update + Normal Starting 53s KubeDB Ops-manager Operator Resuming PgBouncer database: demo/pb-update + Normal Successful 53s KubeDB Ops-manager Operator Successfully resumed PgBouncer database: demo/pb-update + Normal Successful 53s KubeDB Ops-manager Operator Controller has Successfully updated the version of PgBouncer database: demo/pb-update +``` + +Now, we are going to verify whether the `PgBouncer` and the related `PetSets` their `Pods` have the new version image. Let's check, + +```bash +$ kubectl get pb -n demo pb-update -o=jsonpath='{.spec.version}{"\n"}' +1.23.1 + +$ kubectl get petset -n demo pb-update -o=jsonpath='{.spec.template.spec.containers[0].image}{"\n"}' +ghcr.io/kubedb/pgbouncer:1.23.1@sha256:9829a24c60938ab709fe9e039fecd9f0019354edf4e74bfd9e62bb2203e945ee + +$ kubectl get pods -n demo pb-update-0 -o=jsonpath='{.spec.containers[0].image}{"\n"}' +ghcr.io/kubedb/pgbouncer:1.23.1@sha256:9829a24c60938ab709fe9e039fecd9f0019354edf4e74bfd9e62bb2203e945ee +``` + +You can see from above, our `PgBouncer` has been updated with the new version. So, the update process is successfully completed. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete pb -n demo pb-update +kubectl delete pgbounceropsrequest -n demo pgbouncer-version-update +``` \ No newline at end of file diff --git a/docs/images/day-2-operation/pgbouncer/autoscaling.png b/docs/images/day-2-operation/pgbouncer/autoscaling.png new file mode 100644 index 0000000000..f1b2ba2f1b Binary files /dev/null and b/docs/images/day-2-operation/pgbouncer/autoscaling.png differ diff --git a/docs/images/day-2-operation/pgbouncer/horizontal-scaling.png b/docs/images/day-2-operation/pgbouncer/horizontal-scaling.png new file mode 100644 index 0000000000..9ae39092c6 Binary files /dev/null and b/docs/images/day-2-operation/pgbouncer/horizontal-scaling.png differ diff --git a/docs/images/day-2-operation/pgbouncer/prometheus-operator.png b/docs/images/day-2-operation/pgbouncer/prometheus-operator.png new file mode 100644 index 0000000000..abcb7e8fb6 Binary files /dev/null and b/docs/images/day-2-operation/pgbouncer/prometheus-operator.png differ diff --git a/docs/images/day-2-operation/pgbouncer/reconfiguring.png b/docs/images/day-2-operation/pgbouncer/reconfiguring.png new file mode 100644 index 0000000000..11c617da0c Binary files /dev/null and b/docs/images/day-2-operation/pgbouncer/reconfiguring.png differ diff --git a/docs/images/day-2-operation/pgbouncer/update-version.svg b/docs/images/day-2-operation/pgbouncer/update-version.svg new file mode 100644 index 0000000000..793adcbc45 --- /dev/null +++ b/docs/images/day-2-operation/pgbouncer/update-version.svg @@ -0,0 +1,103 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/day-2-operation/pgbouncer/vertical-scaling.svg b/docs/images/day-2-operation/pgbouncer/vertical-scaling.svg new file mode 100644 index 0000000000..e8effa6bdf --- /dev/null +++ b/docs/images/day-2-operation/pgbouncer/vertical-scaling.svg @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/images/pgbouncer/monitoring/pb-builtin-prom-target.png b/docs/images/pgbouncer/monitoring/pb-builtin-prom-target.png index c7f19ea488..882e0b29ce 100644 Binary files a/docs/images/pgbouncer/monitoring/pb-builtin-prom-target.png and b/docs/images/pgbouncer/monitoring/pb-builtin-prom-target.png differ