From e144c0a023ae30a199e8c47c3c139977aa66f3c8 Mon Sep 17 00:00:00 2001 From: souravbiswassanto Date: Sat, 31 Aug 2024 14:24:25 +0600 Subject: [PATCH] temp Signed-off-by: souravbiswassanto --- .../autoscaler/compute/ha-postgres.yaml | 28 + .../autoscaler/compute/pgas-compute.yaml | 24 + .../autoscaler/storage/ha-postgres.yaml | 17 + .../autoscaler/storage/pgas-storage.yaml | 14 + docs/guides/postgres/autoscaler/_index.md | 10 + .../postgres/autoscaler/compute/_index.md | 10 + .../postgres/autoscaler/compute/cluster.md | 536 ++++++++++++++++++ .../postgres/autoscaler/compute/overview.md | 55 ++ .../postgres/autoscaler/storage/_index.md | 10 + .../postgres/autoscaler/storage/cluster.md | 318 +++++++++++ .../postgres/autoscaler/storage/overview.md | 57 ++ 11 files changed, 1079 insertions(+) create mode 100644 docs/examples/postgres/autoscaler/compute/ha-postgres.yaml create mode 100644 docs/examples/postgres/autoscaler/compute/pgas-compute.yaml create mode 100644 docs/examples/postgres/autoscaler/storage/ha-postgres.yaml create mode 100644 docs/examples/postgres/autoscaler/storage/pgas-storage.yaml create mode 100644 docs/guides/postgres/autoscaler/_index.md create mode 100644 docs/guides/postgres/autoscaler/compute/_index.md create mode 100644 docs/guides/postgres/autoscaler/compute/cluster.md create mode 100644 docs/guides/postgres/autoscaler/compute/overview.md create mode 100644 docs/guides/postgres/autoscaler/storage/_index.md create mode 100644 docs/guides/postgres/autoscaler/storage/cluster.md create mode 100644 docs/guides/postgres/autoscaler/storage/overview.md diff --git a/docs/examples/postgres/autoscaler/compute/ha-postgres.yaml b/docs/examples/postgres/autoscaler/compute/ha-postgres.yaml new file mode 100644 index 000000000..fce6d4596 --- /dev/null +++ b/docs/examples/postgres/autoscaler/compute/ha-postgres.yaml @@ -0,0 +1,28 @@ +apiVersion: kubedb.com/v1 +kind: Postgres +metadata: + name: ha-postgres + namespace: demo +spec: + version: "16.1" + replicas: 3 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + podTemplate: + spec: + containers: + - name: postgres + resources: + requests: + cpu: "200m" + memory: "512Mi" + limits: + cpu: "200m" + memory: "512Mi" + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/postgres/autoscaler/compute/pgas-compute.yaml b/docs/examples/postgres/autoscaler/compute/pgas-compute.yaml new file mode 100644 index 000000000..28f7f9a8c --- /dev/null +++ b/docs/examples/postgres/autoscaler/compute/pgas-compute.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PostgresAutoscaler +metadata: + name: pg-as-compute + namespace: demo +spec: + databaseRef: + name: ha-postgres + opsRequestOptions: + timeout: 3m + apply: IfReady + compute: + postgres: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 250m + memory: 1Gi + maxAllowed: + cpu: 1 + memory: 1Gi + containerControlledValues: "RequestsAndLimits" + controlledResources: ["cpu", "memory"] \ No newline at end of file diff --git a/docs/examples/postgres/autoscaler/storage/ha-postgres.yaml b/docs/examples/postgres/autoscaler/storage/ha-postgres.yaml new file mode 100644 index 000000000..6cd1ae9de --- /dev/null +++ b/docs/examples/postgres/autoscaler/storage/ha-postgres.yaml @@ -0,0 +1,17 @@ +apiVersion: kubedb.com/v1 +kind: Postgres +metadata: + name: ha-postgres + namespace: demo +spec: + version: "16.1" + replicas: 3 + storageType: Durable + storage: + storageClassName: "topolvm-provisioner" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut \ No newline at end of file diff --git a/docs/examples/postgres/autoscaler/storage/pgas-storage.yaml b/docs/examples/postgres/autoscaler/storage/pgas-storage.yaml new file mode 100644 index 000000000..18dac2409 --- /dev/null +++ b/docs/examples/postgres/autoscaler/storage/pgas-storage.yaml @@ -0,0 +1,14 @@ +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PostgresAutoscaler +metadata: + name: pg-as-st + namespace: demo +spec: + databaseRef: + name: ha-postgres + storage: + postgres: + trigger: "On" + usageThreshold: 20 + scalingThreshold: 20 + expansionMode: "Online" \ No newline at end of file diff --git a/docs/guides/postgres/autoscaler/_index.md b/docs/guides/postgres/autoscaler/_index.md new file mode 100644 index 000000000..3f988ffd1 --- /dev/null +++ b/docs/guides/postgres/autoscaler/_index.md @@ -0,0 +1,10 @@ +--- +title: Autoscaling +menu: + docs_{{ .version }}: + identifier: pg-auto-scaling + name: Autoscaling + parent: pg-postgres-guides + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/postgres/autoscaler/compute/_index.md b/docs/guides/postgres/autoscaler/compute/_index.md new file mode 100644 index 000000000..31a232835 --- /dev/null +++ b/docs/guides/postgres/autoscaler/compute/_index.md @@ -0,0 +1,10 @@ +--- +title: Compute Autoscaling +menu: + docs_{{ .version }}: + identifier: mg-compute-auto-scaling + name: Compute Autoscaling + parent: mg-auto-scaling + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/postgres/autoscaler/compute/cluster.md b/docs/guides/postgres/autoscaler/compute/cluster.md new file mode 100644 index 000000000..e74201357 --- /dev/null +++ b/docs/guides/postgres/autoscaler/compute/cluster.md @@ -0,0 +1,536 @@ +--- +title: Postgres Cluster Autoscaling +menu: + docs_{{ .version }}: + identifier: pg-auto-scaling-cluster + name: Cluster + parent: pg-compute-auto-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Autoscaling the Compute Resource of a Postgres Cluster Database + +This guide will show you how to use `KubeDB` to auto-scale compute resources i.e. cpu and memory of a Postgres cluster database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Ops-Manager and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- You should be familiar with the following `KubeDB` concepts: + - [Postgres](/docs/guides/postgres/concepts/postgres.md) + - [PostgresAutoscaler](/docs/guides/postgres/concepts/autoscaler) + - [PostgresOpsRequest](/docs/guides/postgres/concepts/opsrequest.md) + - [Compute Resource Autoscaling Overview](/docs/guides/postgres/autoscaler/compute/overview.md) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` +## Autoscaling of Cluster Database + +Here, we are going to deploy a `Postgres` Cluster using a supported version by `KubeDB` operator. Then we are going to apply `PostgresAutoscaler` to set up autoscaling. + +#### Deploy Postgres Cluster + +In this section, we are going to deploy a Postgres Cluster with version `16.1'`. Then, in the next section we will set up autoscaling for this database using `PostgresAutoscaler` CRD. Below is the YAML of the `Postgres` CR that we are going to create, +> If you want to autoscale Postgres `Standalone`, Just remove the `spec.Replicas` from the below yaml and rest of the steps are same. + +```yaml +apiVersion: kubedb.com/v1 +kind: Postgres +metadata: + name: ha-postgres + namespace: demo +spec: + version: "16.1" + replicas: 3 + storageType: Durable + storage: + storageClassName: "standard" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + podTemplate: + spec: + containers: + - name: postgres + resources: + requests: + cpu: "200m" + memory: "512Mi" + limits: + cpu: "200m" + memory: "512Mi" + deletionPolicy: WipeOut +``` + +Let's create the `Postgres` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/postgres/autoscaler/compute/ha-postgres.yaml +postgres.kubedb.com/ha-postgres created +``` + +Now, wait until `ha-postgres` has status `Ready`. i.e, + +```bash +$ kubectl get postgres -n demo +NAME VERSION STATUS AGE +ha-postgres 16.1 Ready 14m +``` + +Let's check the Pod containers resources, + +```bash +$ kubectl get pod -n demo ha-postgres-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "200m", + "memory": "512Mi" + }, + "requests": { + "cpu": "200m", + "memory": "512Mi" + } +} +``` + +Let's check the Postgres resources, +```bash +$ kubectl get postgres -n demo ha-postgres -o json | jq '.spec.podTemplate.spec.resources' +{ + "limits": { + "cpu": "200m", + "memory": "512Mi" + }, + "requests": { + "cpu": "200m", + "memory": "512Mi" + } +} +``` + +You can see from the above outputs that the resources are same as the one we have assigned while deploying the postgres. + +We are now ready to apply the `PostgresAutoscaler` CRO to set up autoscaling for this database. + +### Compute Resource Autoscaling + +Here, we are going to set up compute resource autoscaling using a PostgresAutoscaler Object. + +#### Create PostgresAutoscaler Object + +In order to set up compute resource autoscaling for this database cluster, we have to create a `PostgresAutoscaler` CRO with our desired configuration. Below is the YAML of the `PostgresAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PostgresAutoscaler +metadata: + name: pg-as-compute + namespace: demo +spec: + databaseRef: + name: ha-postgres + opsRequestOptions: + timeout: 3m + apply: IfReady + compute: + postgres: + trigger: "On" + podLifeTimeThreshold: 5m + resourceDiffPercentage: 20 + minAllowed: + cpu: 250m + memory: 1Gi + maxAllowed: + cpu: 1 + memory: 1Gi + containerControlledValues: "RequestsAndLimits" + controlledResources: ["cpu", "memory"] +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing compute resource scaling operation on `ha-postgres` database. +- `spec.compute.postgres.trigger` specifies that compute autoscaling is enabled for this database. +- `spec.compute.postgres.podLifeTimeThreshold` specifies the minimum lifetime for at least one of the pod to initiate a vertical scaling. +- `spec.compute.postgres.resourceDiffPercentage` specifies the minimum resource difference in percentage. The default is 10%. + If the difference between current & recommended resource is less than ResourceDiffPercentage, Autoscaler Operator will ignore the updating. +- `spec.compute.postgres.minAllowed` specifies the minimum allowed resources for the database. +- `spec.compute.postgres.maxAllowed` specifies the maximum allowed resources for the database. +- `spec.compute.postgres.controlledResources` specifies the resources that are controlled by the autoscaler. +- `spec.compute.postgres.containerControlledValues` specifies which resource values should be controlled. The default is "RequestsAndLimits". +- `spec.opsRequestOptions.apply` has two supported value : `IfReady` & `Always`. + Use `IfReady` if you want to process the opsReq only when the database is Ready. And use `Always` if you want to process the execution of opsReq irrespective of the Database state. +- `spec.opsRequestOptions.timeout` specifies the maximum time for each step of the opsRequest(in seconds). + If a step doesn't finish within the specified timeout, the ops request will result in failure. + + +Let's create the `PostgresAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/postgres/autoscaler/compute/pgas-compute.yaml +postgresautoscaler.autoscaling.kubedb.com/pgas-compute created +``` + +#### Verify Autoscaling is set up successfully + +Let's check that the `postgresautoscaler` resource is created successfully, + +```bash +$ kubectl get postgresautoscaler -n demo +NAME AGE +pg-as-compute 5m56s + +$ kubectl describe postgresautoscaler pg-as-compute -n demo +Name: pg-as-compute +Namespace: demo +Labels: +Annotations: +API Version: autoscaling.kubedb.com/v1alpha1 +Kind: PostgresAutoscaler +Metadata: + Creation Timestamp: 2022-09-16T11:26:58Z + Generation: 1 + Managed Fields: + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:annotations: + .: + f:kubectl.kubernetes.io/last-applied-configuration: + f:spec: + .: + f:compute: + .: + f:postgres: + .: + f:containerControlledValues: + f:controlledResources: + f:maxAllowed: + .: + f:cpu: + f:memory: + f:minAllowed: + .: + f:cpu: + f:memory: + f:podLifeTimeThreshold: + f:resourceDiffPercentage: + f:trigger: + f:databaseRef: + .: + f:name: + f:opsRequestOptions: + .: + f:apply: + f:timeout: + Manager: kubectl-client-side-apply + Operation: Update + Time: 2022-09-16T11:26:58Z + API Version: autoscaling.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:checkpoints: + f:conditions: + f:vpas: + Manager: kubedb-autoscaler + Operation: Update + Subresource: status + Time: 2022-09-16T11:27:07Z + Resource Version: 846645 + UID: 44bd46c3-bbc5-4c4a-aff4-00c7f84c6f58 +Spec: + Compute: + Mariadb: + Container Controlled Values: RequestsAndLimits + Controlled Resources: + cpu + memory + Max Allowed: + Cpu: 1 + Memory: 1Gi + Min Allowed: + Cpu: 250m + Memory: 1Gi + Pod Life Time Threshold: 5m0s + Resource Diff Percentage: 20 + Trigger: On + Database Ref: + Name: ha-postgres + Ops Request Options: + Apply: IfReady + Timeout: 3m0s +Status: + Checkpoints: + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Index: 46 + Weight: 555 + Reference Timestamp: 2022-09-16T00:00:00Z + Total Weight: 2.648440345821337 + First Sample Start: 2022-09-16T11:26:48Z + Last Sample Start: 2022-09-16T11:32:52Z + Last Update Time: 2022-09-16T11:33:02Z + Memory Histogram: + Bucket Weights: + Index: 1 + Weight: 10000 + Reference Timestamp: 2022-09-17T00:00:00Z + Total Weight: 1.391848625060675 + Ref: + Container Name: md-coordinator + Vpa Object Name: ha-postgres + Total Samples Count: 19 + Version: v3 + Cpu Histogram: + Bucket Weights: + Index: 0 + Weight: 10000 + Index: 3 + Weight: 556 + Reference Timestamp: 2022-09-16T00:00:00Z + Total Weight: 2.648440345821337 + First Sample Start: 2022-09-16T11:26:48Z + Last Sample Start: 2022-09-16T11:32:52Z + Last Update Time: 2022-09-16T11:33:02Z + Memory Histogram: + Reference Timestamp: 2022-09-17T00:00:00Z + Ref: + Container Name: postgres + Vpa Object Name: ha-postgres + Total Samples Count: 19 + Version: v3 + Conditions: + Last Transition Time: 2022-09-16T11:27:07Z + Message: Successfully created mariaDBOpsRequest demo/pgops-ha-postgres-6xc1kc + Observed Generation: 1 + Reason: CreateOpsRequest + Status: True + Type: CreateOpsRequest + Vpas: + Conditions: + Last Transition Time: 2022-09-16T11:27:02Z + Status: True + Type: RecommendationProvided + Recommendation: + Container Recommendations: + Container Name: postgres + Lower Bound: + Cpu: 250m + Memory: 1Gi + Target: + Cpu: 250m + Memory: 1Gi + Uncapped Target: + Cpu: 25m + Memory: 262144k + Upper Bound: + Cpu: 1 + Memory: 1Gi + Vpa Name: ha-postgres +Events: + +``` +So, the `postgresautoscaler` resource is created successfully. + +We can verify from the above output that `status.vpas` contains the `RecommendationProvided` condition to true. And in the same time, `status.vpas.recommendation.containerRecommendations` contain the actual generated recommendation. + +Our autoscaler operator continuously watches the recommendation generated and creates an `postgresopsrequest` based on the recommendations, if the database pod resources are needed to scaled up or down. + +Let's watch the `postgresopsrequest` in the demo namespace to see if any `postgresopsrequest` object is created. After some time you'll see that a `postgresopsrequest` will be created based on the recommendation. + +```bash +$ kubectl get postgresopsrequest -n demo +NAME TYPE STATUS AGE +pgops-ha-postgres-6xc1kc VerticalScaling Progressing 7s +``` + +Let's wait for the ops request to become successful. + +```bash +$ kubectl get postgresopsrequest -n demo +NAME TYPE STATUS AGE +pgops-vpa-ha-postgres-z43wc8 VerticalScaling Successful 3m32s +``` + +We can see from the above output that the `PostgresOpsRequest` has succeeded. If we describe the `PostgresOpsRequest` we will get an overview of the steps that were followed to scale the database. + +```bash +$ kubectl describe postgresopsrequest -n demo pgops-vpa-ha-postgres-z43wc8 +Name: pgops-ha-postgres-6xc1kc +Namespace: demo +Labels: +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PostgresOpsRequest +Metadata: + Creation Timestamp: 2022-09-16T11:27:07Z + Generation: 1 + Managed Fields: + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:metadata: + f:ownerReferences: + .: + k:{"uid":"44bd46c3-bbc5-4c4a-aff4-00c7f84c6f58"}: + f:spec: + .: + f:apply: + f:databaseRef: + .: + f:name: + f:timeout: + f:type: + f:verticalScaling: + .: + f:postgres: + .: + f:limits: + .: + f:cpu: + f:memory: + f:requests: + .: + f:cpu: + f:memory: + Manager: kubedb-autoscaler + Operation: Update + Time: 2022-09-16T11:27:07Z + API Version: ops.kubedb.com/v1alpha1 + Fields Type: FieldsV1 + fieldsV1: + f:status: + .: + f:conditions: + f:observedGeneration: + f:phase: + Manager: kubedb-ops-manager + Operation: Update + Subresource: status + Time: 2022-09-16T11:27:07Z + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: PostgresAutoscaler + Name: pg-as-compute + UID: 44bd46c3-bbc5-4c4a-aff4-00c7f84c6f58 + Resource Version: 846324 + UID: c2b30107-c6d3-44bb-adf3-135edc5d615b +Spec: + Apply: IfReady + Database Ref: + Name: ha-postgres + Timeout: 2m0s + Type: VerticalScaling + Vertical Scaling: + Mariadb: + Limits: + Cpu: 250m + Memory: 1Gi + Requests: + Cpu: 250m + Memory: 1Gi +Status: + Conditions: + Last Transition Time: 2022-09-16T11:27:07Z + Message: Controller has started to Progress the PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-09-16T11:30:42Z + Message: Successfully restarted Postgres pods for PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Observed Generation: 1 + Reason: SuccessfullyRestatedPetSet + Status: True + Type: RestartPetSet + Last Transition Time: 2022-09-16T11:30:47Z + Message: Vertical scale successful for PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Observed Generation: 1 + Reason: SuccessfullyPerformedVerticalScaling + Status: True + Type: VerticalScaling + Last Transition Time: 2022-09-16T11:30:47Z + Message: Controller has successfully scaled the Postgres demo/pgops-ha-postgres-6xc1kc + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 1 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 8m48s KubeDB Enterprise Operator Start processing for PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Normal Starting 8m48s KubeDB Enterprise Operator Pausing Postgres databse: demo/ha-postgres + Normal Successful 8m48s KubeDB Enterprise Operator Successfully paused Postgres database: demo/ha-postgres for PostgresOpsRequest: pgops-ha-postgres-6xc1kc + Normal Starting 8m43s KubeDB Enterprise Operator Restarting Pod: demo/ha-postgres-0 + Normal Starting 7m33s KubeDB Enterprise Operator Restarting Pod: demo/ha-postgres-1 + Normal Starting 6m23s KubeDB Enterprise Operator Restarting Pod: demo/ha-postgres-2 + Normal Successful 5m13s KubeDB Enterprise Operator Successfully restarted Postgres pods for PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Normal Successful 5m8s KubeDB Enterprise Operator Vertical scale successful for PostgresOpsRequest: demo/pgops-ha-postgres-6xc1kc + Normal Starting 5m8s KubeDB Enterprise Operator Resuming Postgres database: demo/ha-postgres + Normal Successful 5m8s KubeDB Enterprise Operator Successfully resumed Postgres database: demo/ha-postgres + Normal Successful 5m8s KubeDB Enterprise Operator Controller has Successfully scaled the Postgres database: demo/ha-postgres +``` + +Now, we are going to verify from the Pod, and the Postgres yaml whether the resources of the cluster database has updated to meet up the desired state, Let's check, + +```bash +$ kubectl get pod -n demo ha-postgres-0 -o json | jq '.spec.containers[].resources' +{ + "limits": { + "cpu": "250m", + "memory": "1Gi" + }, + "requests": { + "cpu": "250m", + "memory": "1Gi" + } +} + +$ kubectl get postgres -n demo ha-postgres -o json | jq '.spec.podTemplate.spec.resources' +{ + "limits": { + "cpu": "250m", + "memory": "1Gi" + }, + "requests": { + "cpu": "250m", + "memory": "1Gi" + } +} +``` + + +The above output verifies that we have successfully autoscaled the resources of the Postgres cluster database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete postgres -n demo ha-postgres +kubectl delete postgresautoscaler -n demo pg-as-compute +kubectl delete ns demo +``` \ No newline at end of file diff --git a/docs/guides/postgres/autoscaler/compute/overview.md b/docs/guides/postgres/autoscaler/compute/overview.md new file mode 100644 index 000000000..4813e2777 --- /dev/null +++ b/docs/guides/postgres/autoscaler/compute/overview.md @@ -0,0 +1,55 @@ +--- +title: Postgres Compute Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: pg-auto-scaling-overview + name: Overview + parent: pg-compute-auto-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Postgres Compute Resource Autoscaling + +This guide will give an overview on how KubeDB Autoscaler operator autoscales the database compute resources i.e. cpu and memory using `postgresautoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Postgres](/docs/guides/postgres/concepts/postgres.md) + - [PostgresAutoscaler](/docs/guides/postgres/concepts/autoscaler.md) + - [PostgresOpsRequest](/docs/guides/postgres/concepts/opsrequest.md) + +## How Compute Autoscaling Works + +The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `Postgres` database components. Open the image in a new tab to see the enlarged version. + +
+  Compute Auto Scaling process of Postgres +
Fig: Compute Auto Scaling process of Postgres
+
+ +The Auto Scaling process consists of the following steps: + +1. At first, a user creates a `Postgres` Custom Resource Object (CRO). + +2. `KubeDB` Provisioner operator watches the `Postgres` CRO. + +3. When the operator finds a `Postgres` CRO, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +4. Then, in order to set up autoscaling of the `Postgres` database the user creates a `PostgresAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `PostgresAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator generates recommendation using the modified version of kubernetes [official recommender](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler/pkg/recommender) for different components of the database, as specified in the `PostgresAutoscaler` CRO. + +7. If the generated recommendation doesn't match the current resources of the database, then `KubeDB` Autoscaler operator creates a `PostgresOpsRequest` CRO to scale the database to match the recommendation generated. + +8. `KubeDB` Ops-manager operator watches the `PostgresOpsRequest` CRO. + +9. Then the `KubeDB` Ops-manager operator will scale the database component vertically as specified on the `PostgresOpsRequest` CRO. + +In the next docs, we are going to show a step by step guide on Autoscaling of various Postgres database components using `PostgresAutoscaler` CRD. diff --git a/docs/guides/postgres/autoscaler/storage/_index.md b/docs/guides/postgres/autoscaler/storage/_index.md new file mode 100644 index 000000000..4feb575b5 --- /dev/null +++ b/docs/guides/postgres/autoscaler/storage/_index.md @@ -0,0 +1,10 @@ +--- +title: Storage Autoscaling +menu: + docs_{{ .version }}: + identifier: pg-storage-auto-scaling + name: Storage Autoscaling + parent: pg-auto-scaling + weight: 46 +menu_name: docs_{{ .version }} +--- diff --git a/docs/guides/postgres/autoscaler/storage/cluster.md b/docs/guides/postgres/autoscaler/storage/cluster.md new file mode 100644 index 000000000..0217df671 --- /dev/null +++ b/docs/guides/postgres/autoscaler/storage/cluster.md @@ -0,0 +1,318 @@ +--- +title: Postgres Cluster Autoscaling +menu: + docs_{{ .version }}: + identifier: pg-storage-auto-scaling-cluster + name: Cluster + parent: pg-storage-auto-scaling + weight: 20 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Storage Autoscaling of a Postgres Cluster + +This guide will show you how to use `KubeDB` to autoscale the storage of a Postgres Replicaset database. + +## Before You Begin + +- At first, you need to have a Kubernetes cluster, and the `kubectl` command-line tool must be configured to communicate with your cluster. + +- Install `KubeDB` Community, Enterprise and Autoscaler operator in your cluster following the steps [here](/docs/setup/README.md). + +- Install `Metrics Server` from [here](https://github.com/kubernetes-sigs/metrics-server#installation) + +- Install Prometheus from [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) + +- You must have a `StorageClass` that supports volume expansion. + +- You should be familiar with the following `KubeDB` concepts: + - [Postgres](/docs/guides/postgres/concepts/postgres) + - [PostgresAutoscaler](/docs/guides/postgres/concepts/autoscaler) + - [PostgresOpsRequest](/docs/guides/postgres/concepts/opsrequest) + - [Storage Autoscaling Overview](/docs/guides/postgres/autoscaler/storage/overview) + +To keep everything isolated, we are going to use a separate namespace called `demo` throughout this tutorial. + +```bash +$ kubectl create ns demo +namespace/demo created +``` + +## Storage Autoscaling of Cluster Database + +At first verify that your cluster has a storage class, that supports volume expansion. Let's check, + +```bash +$ kubectl get storageclass +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +standard (default) rancher.io/local-path Delete WaitForFirstConsumer false 79m +topolvm-provisioner topolvm.cybozu.com Delete WaitForFirstConsumer true 78m +``` + +We can see from the output the `topolvm-provisioner` storage class has `ALLOWVOLUMEEXPANSION` field as true. So, this storage class supports volume expansion. We can use it. You can install topolvm from [here](https://github.com/topolvm/topolvm) + +Now, we are going to deploy a `Postgres` cluster using a supported version by `KubeDB` operator. Then we are going to apply `PostgresAutoscaler` to set up autoscaling. + +#### Deploy Postgres Cluster + +In this section, we are going to deploy a Postgres cluster database with version `16.1`. Then, in the next section we will set up autoscaling for this database using `PostgresAutoscaler` CRD. Below is the YAML of the `Postgres` CR that we are going to create, + +> If you want to autoscale Postgres `Standalone`, Just remove the `spec.Replicas` from the below yaml and rest of the steps are same. + +```yaml +apiVersion: kubedb.com/v1 +kind: Postgres +metadata: + name: ha-postgres + namespace: demo +spec: + version: "16.1" + replicas: 3 + storageType: Durable + storage: + storageClassName: "topolvm-provisioner" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + deletionPolicy: WipeOut +``` + +Let's create the `Postgres` CRO we have shown above, + +```bash +$ kubectl create -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/postgres/autoscaler/storage/ha-postgres.yaml +postgres.kubedb.com/ha-postgres created +``` + +Now, wait until `ha-postgres` has status `Ready`. i.e, + +```bash +$ kubectl get postgres -n demo +NAME VERSION STATUS AGE +ha-postgres 16.1 Ready 3m46s +``` + +Let's check volume size from petset, and from the persistent volume, + +```bash +$ kubectl get sts -n demo ha-postgres -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1Gi" + +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-43266d76-f280-4cca-bd78-d13660a84db9 1Gi RWO Delete Bound demo/data-ha-postgres-2 topolvm-provisioner 57s +pvc-4a509b05-774b-42d9-b36d-599c9056af37 1Gi RWO Delete Bound demo/data-ha-postgres-0 topolvm-provisioner 58s +pvc-c27eee12-cd86-4410-b39e-b1dd735fc14d 1Gi RWO Delete Bound demo/data-ha-postgres-1 topolvm-provisioner 57s +``` + +You can see the petset has 1GB storage, and the capacity of all the persistent volume is also 1GB. + +We are now ready to apply the `PostgresAutoscaler` CRO to set up storage autoscaling for this database. + +### Storage Autoscaling + +Here, we are going to set up storage autoscaling using a `PostgresAutoscaler` Object. + +#### Create PostgresAutoscaler Object + +In order to set up vertical autoscaling for this cluster database, we have to create a `PostgresAutoscaler` CRO with our desired configuration. Below is the YAML of the `PostgresAutoscaler` object that we are going to create, + +```yaml +apiVersion: autoscaling.kubedb.com/v1alpha1 +kind: PostgresAutoscaler +metadata: + name: pg-as-st + namespace: demo +spec: + databaseRef: + name: ha-postgres + storage: + postgres: + trigger: "On" + usageThreshold: 20 + scalingThreshold: 20 + expansionMode: "Online" +``` + +Here, + +- `spec.databaseRef.name` specifies that we are performing vertical scaling operation on `ha-postgres` database. +- `spec.storage.postgres.trigger` specifies that storage autoscaling is enabled for this database. +- `spec.storage.postgres.usageThreshold` specifies storage usage threshold, if storage usage exceeds `20%` then storage autoscaling will be triggered. +- `spec.storage.postgres.scalingThreshold` specifies the scaling threshold. Storage will be scaled to `20%` of the current amount. +- `spec.storage.postgres.expansionMode` specifies the expansion mode of volume expansion `PostgresOpsRequest` created by `PostgresAutoscaler`. topolvm-provisioner supports online volume expansion so here `expansionMode` is set as "Online". + +Let's create the `PostgresAutoscaler` CR we have shown above, + +```bash +$ kubectl apply -f https://github.com/kubedb/docs/raw/{{< param "info.version" >}}/docs/examples/postgres/autoscaler/storage/pgas-storage.yaml +postgresautoscaler.autoscaling.kubedb.com/pg-as-st created +``` + +#### Storage Autoscaling is set up successfully + +Let's check that the `postgresautoscaler` resource is created successfully, + +```bash +$ kubectl get postgresautoscaler -n demo +NAME AGE +pg-as-st 33s + +$ kubectl describe postgresautoscaler pg-as-st -n demo +Name: pg-as-st +Namespace: demo +Labels: +Annotations: API Version: autoscaling.kubedb.com/v1alpha1 +Kind: PostgresAutoscaler +Metadata: + Creation Timestamp: 2022-01-14T06:08:02Z + Generation: 1 + Managed Fields: + ... + Resource Version: 24009 + UID: 4f45a3b3-fc72-4d04-b52c-a770944311f6 +Spec: + Database Ref: + Name: ha-postgres + Storage: + Mariadb: + Scaling Threshold: 20 + Trigger: On + Usage Threshold: 20 +Events: +``` + +So, the `postgresautoscaler` resource is created successfully. + +Now, for this demo, we are going to manually fill up the persistent volume to exceed the `usageThreshold` using `dd` command to see if storage autoscaling is working or not. + +Let's exec into the database pod and fill the database volume(`/var/pv/data`) using the following commands: + +```bash +$ kubectl exec -it -n demo ha-postgres-0 -- bash +root@ha-postgres-0:/ df -h /var/pv/data +Filesystem Size Used Avail Use% Mounted on +/dev/topolvm/57cd4330-784f-42c1-bf8e-e743241df164 1014M 357M 658M 36% /var/pv/data +root@ha-postgres-0:/ dd if=/dev/zero of=/var/pv/data/file.img bs=500M count=1 +1+0 records in +1+0 records out +524288000 bytes (524 MB, 500 MiB) copied, 0.340877 s, 1.5 GB/s +root@ha-postgres-0:/ df -h /var/pv/data +Filesystem Size Used Avail Use% Mounted on +/dev/topolvm/57cd4330-784f-42c1-bf8e-e743241df164 1014M 857M 158M 85% /var/pv/data +``` + +So, from the above output we can see that the storage usage is 83%, which exceeded the `usageThreshold` 20%. + +Let's watch the `postgresopsrequest` in the demo namespace to see if any `postgresopsrequest` object is created. After some time you'll see that a `postgresopsrequest` of type `VolumeExpansion` will be created based on the `scalingThreshold`. + +```bash +$ kubectl get postgresopsrequest -n demo +NAME TYPE STATUS AGE +pgops-ha-postgres-xojkua VolumeExpansion Progressing 15s +``` + +Let's wait for the ops request to become successful. + +```bash +$ kubectl get postgresopsrequest -n demo +NAME TYPE STATUS AGE +pgops-ha-postgres-xojkua VolumeExpansion Successful 97s +``` + +We can see from the above output that the `PostgresOpsRequest` has succeeded. If we describe the `PostgresOpsRequest` we will get an overview of the steps that were followed to expand the volume of the database. + +```bash +$ kubectl describe postgresopsrequest -n demo mops-ha-postgres-xojkua +Name: mops-ha-postgres-xojkua +Namespace: demo +Labels: app.kubernetes.io/component=database + app.kubernetes.io/instance=ha-postgres + app.kubernetes.io/managed-by=kubedb.com + app.kubernetes.io/name=postgress.kubedb.com +Annotations: +API Version: ops.kubedb.com/v1alpha1 +Kind: PostgresOpsRequest +Metadata: + Creation Timestamp: 2022-01-14T06:13:10Z + Generation: 1 + Managed Fields: ... + Owner References: + API Version: autoscaling.kubedb.com/v1alpha1 + Block Owner Deletion: true + Controller: true + Kind: PostgresAutoscaler + Name: pg-as-st + UID: 4f45a3b3-fc72-4d04-b52c-a770944311f6 + Resource Version: 25557 + UID: 90763a49-a03f-407c-a233-fb20c4ab57d7 +Spec: + Database Ref: + Name: ha-postgres + Type: VolumeExpansion + Volume Expansion: + Mariadb: 1594884096 +Status: + Conditions: + Last Transition Time: 2022-01-14T06:13:10Z + Message: Controller has started to Progress the PostgresOpsRequest: demo/mops-ha-postgres-xojkua + Observed Generation: 1 + Reason: OpsRequestProgressingStarted + Status: True + Type: Progressing + Last Transition Time: 2022-01-14T06:14:25Z + Message: Volume Expansion performed successfully in Postgres pod for PostgresOpsRequest: demo/mops-ha-postgres-xojkua + Observed Generation: 1 + Reason: SuccessfullyVolumeExpanded + Status: True + Type: VolumeExpansion + Last Transition Time: 2022-01-14T06:14:25Z + Message: Controller has successfully expand the volume of Postgres demo/mops-ha-postgres-xojkua + Observed Generation: 1 + Reason: OpsRequestProcessedSuccessfully + Status: True + Type: Successful + Observed Generation: 3 + Phase: Successful +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2m58s KubeDB Enterprise Operator Start processing for PostgresOpsRequest: demo/mops-ha-postgres-xojkua + Normal Starting 2m58s KubeDB Enterprise Operator Pausing Postgres databse: demo/ha-postgres + Normal Successful 2m58s KubeDB Enterprise Operator Successfully paused Postgres database: demo/ha-postgres for PostgresOpsRequest: mops-ha-postgres-xojkua + Normal Successful 103s KubeDB Enterprise Operator Volume Expansion performed successfully in Postgres pod for PostgresOpsRequest: demo/mops-ha-postgres-xojkua + Normal Starting 103s KubeDB Enterprise Operator Updating Postgres storage + Normal Successful 103s KubeDB Enterprise Operator Successfully Updated Postgres storage + Normal Starting 103s KubeDB Enterprise Operator Resuming Postgres database: demo/ha-postgres + Normal Successful 103s KubeDB Enterprise Operator Successfully resumed Postgres database: demo/ha-postgres + Normal Successful 103s KubeDB Enterprise Operator Controller has Successfully expand the volume of Postgres: demo/ha-postgres +``` + +Now, we are going to verify from the `Petset`, and the `Persistent Volume` whether the volume of the cluster database has expanded to meet the desired state, Let's check, + +```bash +$ kubectl get sts -n demo ha-postgres -o json | jq '.spec.volumeClaimTemplates[].spec.resources.requests.storage' +"1594884096" +$ kubectl get pv -n demo +NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE +pvc-43266d76-f280-4cca-bd78-d13660a84db9 2Gi RWO Delete Bound demo/data-ha-postgres-2 topolvm-provisioner 23m +pvc-4a509b05-774b-42d9-b36d-599c9056af37 2Gi RWO Delete Bound demo/data-ha-postgres-0 topolvm-provisioner 24m +pvc-c27eee12-cd86-4410-b39e-b1dd735fc14d 2Gi RWO Delete Bound demo/data-ha-postgres-1 topolvm-provisioner 23m +``` + +The above output verifies that we have successfully autoscaled the volume of the Postgres cluster database. + +## Cleaning Up + +To clean up the Kubernetes resources created by this tutorial, run: + +```bash +kubectl delete postgres -n demo ha-postgres +kubectl delete postgresautoscaler -n demo pg-as-st +kubectl delete ns demo +``` diff --git a/docs/guides/postgres/autoscaler/storage/overview.md b/docs/guides/postgres/autoscaler/storage/overview.md new file mode 100644 index 000000000..d0a343570 --- /dev/null +++ b/docs/guides/postgres/autoscaler/storage/overview.md @@ -0,0 +1,57 @@ +--- +title: Postgres Storage Autoscaling Overview +menu: + docs_{{ .version }}: + identifier: pg-storage-auto-scaling-overview + name: Overview + parent: pg-storage-auto-scaling + weight: 10 +menu_name: docs_{{ .version }} +section_menu_id: guides +--- + +> New to KubeDB? Please start [here](/docs/README.md). + +# Postgres Vertical Autoscaling + +This guide will give an overview on how KubeDB Autoscaler operator autoscales the database storage using `postgresautoscaler` crd. + +## Before You Begin + +- You should be familiar with the following `KubeDB` concepts: + - [Postgres](/docs/guides/postgres/concepts/postgres.md) + - [PostgresAutoscaler](/docs/guides/postgres/concepts/autoscaler.md) + - [PostgresOpsRequest](/docs/guides/postgres/concepts/opsrequest.md) + +## How Storage Autoscaling Works + +The following diagram shows how KubeDB Autoscaler operator autoscales the resources of `Postgres` database components. Open the image in a new tab to see the enlarged version. + +
+  Storage Auto Scaling process of Postgres +
Fig: Storage Auto Scaling process of Postgres
+
+ + +The Auto Scaling process consists of the following steps: + +1. At first, a user creates a `Postgres` Custom Resource (CR). + +2. `KubeDB` Provisioner operator watches the `Postgres` CR. + +3. When the operator finds a `Postgres` CR, it creates required number of `PetSets` and related necessary stuff like secrets, services, etc. + +- Each PetSet creates a Persistent Volume according to the Volume Claim Template provided in the petset configuration. + +4. Then, in order to set up storage autoscaling of the `Postgres` database the user creates a `PostgresAutoscaler` CRO with desired configuration. + +5. `KubeDB` Autoscaler operator watches the `PostgresAutoscaler` CRO. + +6. `KubeDB` Autoscaler operator continuously watches persistent volumes of the databases to check if it exceeds the specified usage threshold. +- If the usage exceeds the specified usage threshold, then `KubeDB` Autoscaler operator creates a `PostgresOpsRequest` to expand the storage of the database. + +7. `KubeDB` Ops-manager operator watches the `PostgresOpsRequest` CRO. + +8. Then the `KubeDB` Ops-manager operator will expand the storage of the database component as specified on the `PostgresOpsRequest` CRO. + +In the next docs, we are going to show a step by step guide on Autoscaling storage of various Postgres database components using `PostgresAutoscaler` CRD.