2 ? [0] : []
+ content {
+ name = "container.nodeSelector"
+ value = yamlencode(var.node_selector)
+ }
+ }
+ timeout = 1800
+}
diff --git a/modules/app/README.md b/modules/app/README.md
new file mode 100644
index 0000000..ba85155
--- /dev/null
+++ b/modules/app/README.md
@@ -0,0 +1,137 @@
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.0 |
+| [kubernetes](#requirement\_kubernetes) | >= 2.16.1 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [kubernetes](#provider\_kubernetes) | >= 2.16.1 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [kubernetes_config_map.backend_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.databus_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.filebeat_config_in](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.fusion_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.imageproxy_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.nest_rest_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.openresty_config](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.room_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.scheduler_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.socket_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_config_map.web_server_env](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
+| [kubernetes_deployment.backend_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.databus_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.fusion_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.imageproxy_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.job_admin_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.nest_rest_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.openresty](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.room_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.scheduler_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.socket_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_deployment.web_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/deployment) | resource |
+| [kubernetes_horizontal_pod_autoscaler.databus_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource |
+| [kubernetes_horizontal_pod_autoscaler.fusion_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource |
+| [kubernetes_horizontal_pod_autoscaler.nest_rest_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource |
+| [kubernetes_horizontal_pod_autoscaler.room_server_autoscaler](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/horizontal_pod_autoscaler) | resource |
+| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource |
+| [kubernetes_secret.openresty-extend-ssl-certs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource |
+| [kubernetes_secret.openresty-ssl-certs](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/secret) | resource |
+| [kubernetes_service.backend_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.databus_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.fusion_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.imageproxy_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.job_admin_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.nest_rest_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.openresty_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.room_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.scheduler_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.socket_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_service.web_server](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/service) | resource |
+| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/namespace) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [ai\_server\_sc](#input\_ai\_server\_sc) | ai\_server storage class | `map` | {
"size": "10Pi",
"volume_attributes": {
"subPath": "ai_server"
}
}
| no |
+| [backend\_server\_depends\_on](#input\_backend\_server\_depends\_on) | n/a | `string` | `""` | no |
+| [create\_ns](#input\_create\_ns) | Whether to automatically create namespace | `bool` | `true` | no |
+| [default\_server\_host](#input\_default\_server\_host) | Default route processing service | `string` | `"http://web-server"` | no |
+| [default\_server\_host\_override\_proxy\_host](#input\_default\_server\_host\_override\_proxy\_host) | n/a | `string` | `""` | no |
+| [developers\_redirect\_url](#input\_developers\_redirect\_url) | n/a | `string` | `""` | no |
+| [disallow\_robots](#input\_disallow\_robots) | Whether to disable crawlers | `bool` | `true` | no |
+| [docker\_edition](#input\_docker\_edition) | # Deprecate | `string` | `"vika"` | no |
+| [enable\_ssl](#input\_enable\_ssl) | Whether to enable ssl | `bool` | `false` | no |
+| [env](#input\_env) | environment variable | `map(any)` | `{}` | no |
+| [envs](#input\_envs) | Environment variables, submodules replace .env | `map` | `{}` | no |
+| [extend\_tls\_data](#input\_extend\_tls\_data) | Extended certificate crt and key contents | `map(any)` | {
"tls_crt": "",
"tls_domain": "",
"tls_key": ""
}
| no |
+| [has\_ai\_server](#input\_has\_ai\_server) | Whether to deploy AI server? | `bool` | `false` | no |
+| [has\_auto\_reloaded\_config\_map](#input\_has\_auto\_reloaded\_config\_map) | Modify the configMap whether to automatically restart pods? | `bool` | `false` | no |
+| [has\_backend\_server](#input\_has\_backend\_server) | Whether to deploy Java-Api service? | `bool` | `true` | no |
+| [has\_bill\_job\_executor](#input\_has\_bill\_job\_executor) | Whether to deploy XXL-JOB-subscription task executor service? | `bool` | `false` | no |
+| [has\_cron\_job](#input\_has\_cron\_job) | Whether it has a timed task job? | `bool` | `true` | no |
+| [has\_databus\_server](#input\_has\_databus\_server) | Deploy the databus-server? | `bool` | `true` | no |
+| [has\_dingtalk\_server](#input\_has\_dingtalk\_server) | Whether to deploy DingTalk application integration service? | `bool` | `false` | no |
+| [has\_document\_server](#input\_has\_document\_server) | Whether to deploy the Node-Nest.js-Document-Server service? | `bool` | `false` | no |
+| [has\_extend\_tls](#input\_has\_extend\_tls) | Whether to support extended certificate | `bool` | `false` | no |
+| [has\_fusion\_server](#input\_has\_fusion\_server) | Whether to deploy the Node-Nest.js-Fusion-Api-Server service? | `bool` | `true` | no |
+| [has\_imageproxy\_server](#input\_has\_imageproxy\_server) | Whether to deploy the Go image clipping service? | `bool` | `false` | no |
+| [has\_job\_admin\_server](#input\_has\_job\_admin\_server) | Whether to deploy XXL-JOB-Admin service? | `bool` | `false` | no |
+| [has\_load\_balancer](#input\_has\_load\_balancer) | Does it come with Load Balancer? OpenResty exposes IP if any | `bool` | `false` | no |
+| [has\_migration\_server](#input\_has\_migration\_server) | Whether to deploy Java-Data Migration Service? | `bool` | `false` | no |
+| [has\_nest\_rest\_server](#input\_has\_nest\_rest\_server) | /dataPack API only, would be removed after publishing Galaxy version | `bool` | `false` | no |
+| [has\_openresty](#input\_has\_openresty) | Does it come with an openresty gateway? With public IP and load balancing | `bool` | `true` | no |
+| [has\_openresty\_ofelia\_job](#input\_has\_openresty\_ofelia\_job) | whether to bring a lightweight OfeliaJob Container? | `bool` | `false` | no |
+| [has\_room\_server](#input\_has\_room\_server) | Whether to deploy the Node-Nest.js-Room-Server service? | `bool` | `true` | no |
+| [has\_scheduler\_server](#input\_has\_scheduler\_server) | Whether to deploy the Node-Nest.js-Scheduler-Server service? | `bool` | `true` | no |
+| [has\_sensors\_filebeat](#input\_has\_sensors\_filebeat) | Whether to enable Sensors data collection | `bool` | `true` | no |
+| [has\_socket\_server](#input\_has\_socket\_server) | Whether to deploy the Node-Nest.js-Socket-Server service? | `bool` | `true` | no |
+| [has\_space\_job\_executor](#input\_has\_space\_job\_executor) | Whether to deploy XXL-JO-workbench task executor service? | `bool` | `false` | no |
+| [has\_web\_server](#input\_has\_web\_server) | Whether to deploy Web-Server (front-end template) service? | `bool` | `true` | no |
+| [image\_namespace](#input\_image\_namespace) | What namespace container image to use when initializing | `string` | `"vikadata/vika"` | no |
+| [image\_namespaces](#input\_image\_namespaces) | During initialization, you can freely control different container services, which namespaces are used respectively, and if any, overwrite image\_namespace. It is recommended that convention is better than configuration, and corresponding branches should be made in each project | `map(any)` | `{}` | no |
+| [image\_pull\_policy](#input\_image\_pull\_policy) | n/a | `string` | `"IfNotPresent"` | no |
+| [image\_tag](#input\_image\_tag) | What version of the container image tag to use when initializing | `string` | `"latest-alpha"` | no |
+| [image\_tags](#input\_image\_tags) | During initialization, you can freely control different container services, which tags are used respectively, and if any, overwrite image\_tag. It is recommended that convention is better than configuration. Make corresponding branches in each project, and use the last image\_tag variable for global unification instead of configuring here. In addition, it should be noted that the variables here are all underscored, such as the container service backend-server, the variables here correspond to backend\_server, and match the terraform variable naming practice | `map(any)` | `{}` | no |
+| [is\_wait](#input\_is\_wait) | n/a | `bool` | `true` | no |
+| [job\_admin\_server\_host](#input\_job\_admin\_server\_host) | n/a | `string` | `"job-admin-server"` | no |
+| [lbs\_amap\_secret](#input\_lbs\_amap\_secret) | Gaode map reverse proxy security key | `string` | `""` | no |
+| [minio\_host](#input\_minio\_host) | Object storage service address | `string` | `"http://minio.apitable-datacenter:9090"` | no |
+| [namespace](#input\_namespace) | Shared namespace for applications | `string` | `"apitable-app"` | no |
+| [node\_selector](#input\_node\_selector) | Node node label selector | `map` | `{}` | no |
+| [openresty\_annotations](#input\_openresty\_annotations) | openresty annotation, used to control load balancing specifications, slb.s1.small(5k), slb.s3.small(20w) / slb.s3.large(100w) | `map(any)` | {
"service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec": "slb.s1.small"
}
| no |
+| [openresty\_extra\_config](#input\_openresty\_extra\_config) | nginx (openresty) external configuration file, which belongs to http internal level | `string` | `""` | no |
+| [openresty\_index\_block](#input\_openresty\_index\_block) | Homepage URI =/, support nginx, lua code block | `string` | `""` | no |
+| [openresty\_server\_block](#input\_openresty\_server\_block) | nginx (openresty) external configuration file, which belongs to the internal configuration of service | `string` | `""` | no |
+| [pricing\_host](#input\_pricing\_host) | pricing server | `string` | `"http://pricing.apitable-mkt"` | no |
+| [public\_assets\_bucket](#input\_public\_assets\_bucket) | n/a | `string` | `"vk-assets-ltd"` | no |
+| [publish\_databus\_server](#input\_publish\_databus\_server) | Publish the databus-server ? | `bool` | `true` | no |
+| [pv\_csi](#input\_pv\_csi) | csi storage namespace | `map` | {
"driver": "csi.juicefs.com",
"fs_type": "juicefs",
"namespace": "vika-opsbase",
"node_publish_secret_ref": "juicefs-sc-secret"
}
| no |
+| [registry](#input\_registry) | The dockerHub, the default is ghcr.io of github, the Vika accelerator is ghcr.vikadata.com, and the private warehouse is docker.vika.ltd | `string` | `"ghcr.io"` | no |
+| [resources](#input\_resources) | How many resources are used for different services? Including copy, CPU, and memory, the unit is MB. limit is the modified value × 2, and each environment has the default value of the minimum unit to start | `any` | `{}` | no |
+| [server\_name](#input\_server\_name) | default domain name | `string` | `"vika.ltd"` | no |
+| [tls\_crt](#input\_tls\_crt) | tls cert body | `string` | `""` | no |
+| [tls\_key](#input\_tls\_key) | tls key body | `string` | `""` | no |
+| [tls\_name](#input\_tls\_name) | tls cert name | `string` | `""` | no |
+| [worker\_processes](#input\_worker\_processes) | nginx(openresty) worker\_processes CPU core number, the corresponding CPU core number in the old version k8s | `string` | `"auto"` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| [ingress\_ip](#output\_ingress\_ip) | Output ingress ip |
+| [ingress\_ip\_alternative](#output\_ingress\_ip\_alternative) | n/a |
+
\ No newline at end of file
diff --git a/modules/app/backend_server.tf b/modules/app/backend_server.tf
new file mode 100644
index 0000000..26054d9
--- /dev/null
+++ b/modules/app/backend_server.tf
@@ -0,0 +1,314 @@
+locals {
+ default_backend_server_resources = {
+ replicas = "1"
+ requests_cpu = "100m"
+ requests_memory = "1024Mi"
+ limits_cpu = "2000m"
+ limits_memory = "4096Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ }
+}
+
+locals {
+ backend_server_resources = merge(local.default_backend_server_resources, lookup(var.resources, "backend_server", {}))
+}
+
+variable "backend_server_depends_on" {
+ //type = string
+ default = ""
+}
+resource "kubernetes_deployment" "backend_server" {
+ wait_for_rollout = var.is_wait
+ depends_on = [
+ kubernetes_namespace.this,
+ var.backend_server_depends_on
+ ]
+
+ count = var.has_backend_server ? 1 : 0
+ metadata {
+ name = "backend-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "backend-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.backend_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "backend-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "backend-server"
+ # sa-app = "backend-server-filebeat"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.backend-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.backend_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+ // 神策日志目录存放于 empty_dir {}
+ volume {
+ name = "log-path"
+ empty_dir {}
+ }
+
+ //Configuration of filebeat.
+ volume {
+ name = "filebeat-config-in"
+
+ config_map {
+ name = "filebeat-config-in"
+ }
+ }
+
+ node_selector = var.node_selector
+
+ //Initialize the container and create the structure of the My SQL database.
+ init_container {
+ name = "init-db"
+ image = "${var.registry}/${lookup(local.image_namespaces, "init_db")}/init-db:${lookup(local.image_tags, "init_db")}"
+ image_pull_policy = var.image_pull_policy
+ # command = ["sh", "-c", "chmod -R 777 /workdir && echo InitContainerFinished!"]
+ env {
+ name = "DB_HOST"
+ value = lookup(local.env_config, "MYSQL_HOST", "localhost")
+ }
+ env {
+ name = "DB_PORT"
+ value = lookup(local.env_config, "MYSQL_PORT", "3306")
+ }
+ env {
+ name = "DB_NAME"
+ value = lookup(local.env_config, "MYSQL_DATABASE", "")
+ }
+ env {
+ name = "DB_USERNAME"
+ value = lookup(local.env_config, "MYSQL_USERNAME", "root")
+ }
+ env {
+ name = "DB_PASSWORD"
+ value = sensitive(lookup(local.env_config, "MYSQL_PASSWORD", ""))
+ }
+ env {
+ name = "DATABASE_TABLE_PREFIX"
+ value = local.env_config["DATABASE_TABLE_PREFIX"]
+ }
+ env {
+ name = "ACTION"
+ value = "update"
+ }
+
+ }
+ init_container {
+ name = "init-db-enterprise"
+ image = "${var.registry}/${lookup(local.image_namespaces, "init_db")}/init-db-enterprise:${lookup(local.image_tags, "init_db")}"
+ image_pull_policy = var.image_pull_policy
+ # command = ["sh", "-c", "chmod -R 777 /workdir && echo InitContainerFinished!"]
+ env {
+ name = "EDITION"
+ value = lookup(local.backend_server_env, "EDITION", "vika-saas")
+ }
+ env {
+ name = "DB_HOST"
+ value = lookup(local.env_config, "MYSQL_HOST", "localhost")
+ }
+ env {
+ name = "DB_PORT"
+ value = lookup(local.env_config, "MYSQL_PORT", "3306")
+ }
+ env {
+ name = "DB_NAME"
+ value = lookup(local.env_config, "MYSQL_DATABASE", "")
+ }
+ env {
+ name = "DB_USERNAME"
+ value = lookup(local.env_config, "MYSQL_USERNAME", "root")
+ }
+ env {
+ name = "DB_PASSWORD"
+ value = sensitive(lookup(local.env_config, "MYSQL_PASSWORD", ""))
+ }
+ env {
+ name = "DATABASE_TABLE_PREFIX"
+ value = local.env_config["DATABASE_TABLE_PREFIX"]
+ }
+ env {
+ name = "ACTION"
+ value = "update"
+ }
+
+ }
+
+ container {
+ name = "backend-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "backend_server")}/backend-server:${lookup(local.image_tags, "backend_server")}"
+ image_pull_policy = var.image_pull_policy
+ env_from {
+ config_map_ref {
+ name = "backend-server-env"
+ }
+ }
+
+
+ resources {
+ requests = {
+ cpu = local.backend_server_resources["requests_cpu"]
+ memory = local.backend_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.backend_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.backend_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/api/v1/actuator/health/liveness"
+ port = "8081"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 60
+ timeout_seconds = 3
+ period_seconds = 30
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/api/v1/actuator/health/readiness"
+ port = "8081"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 60
+ timeout_seconds = 3
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/api/v1/actuator/health/readiness"
+ port = "8081"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 60
+ timeout_seconds = 3
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ #Sensors data log storage location.
+ volume_mount {
+ name = "log-path"
+ mount_path = "/logs/sensors"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ }
+
+ //Filebeat sidecar, collecting Sensors data logs
+ dynamic "container" {
+ for_each = var.has_sensors_filebeat ? [1] : []
+ content {
+ name = "filebeat"
+ image = "${var.registry}/vikadata/beats/filebeat:7.2.0"
+ args = ["-c", "/etc/filebeat.yml", "-e"]
+
+ volume_mount {
+ name = "filebeat-config-in"
+ read_only = true
+ mount_path = "/etc/filebeat.yml"
+ sub_path = "filebeat.yml"
+ }
+
+ volume_mount {
+ name = "log-path"
+ read_only = true
+ mount_path = "/logs/sensors"
+ }
+ }
+ }
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.backend_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.backend_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "backend_server" {
+ count = var.has_backend_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "backend-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "backend-server-8081-8081"
+ protocol = "TCP"
+ port = 8081
+ target_port = "8081"
+ }
+ port {
+ name = "backend-server-8083-8083"
+ protocol = "TCP"
+ port = 8083
+ target_port = "8083"
+ }
+
+ selector = {
+ app = "backend-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
diff --git a/modules/app/backend_server_env.tf b/modules/app/backend_server_env.tf
new file mode 100644
index 0000000..18bb4f7
--- /dev/null
+++ b/modules/app/backend_server_env.tf
@@ -0,0 +1,72 @@
+locals {
+ backend_server_env = merge(local.env_config, {
+ LOGGER_MAX_HISTORY_DAYS = 1
+
+ CORS_ORIGINS = "*"
+ TEMPLATE_SPACE = "spcNTxlv8Drra"
+
+ SOCKET_DOMAIN = "http://socket-server:3001/socket"
+ OPEN_REDIRECT_URI = "https://integration.vika.ltd/api/v1/wechat/open/getQueryAuth"
+ FEISHU_APP_ENABLE = "true"
+
+ SPRINGFOX_ENABLED = "true"
+ SWAGGER_ENABLED = "true"
+ DECORATOR_ENABLED = "true"
+ ZIPKIN_ENABLED = "false"
+
+
+ # GRPC
+ NEST_GRPC_ADDRESS = "static://room-server:3334"
+ BACKEND_GRPC_PORT = "8083"
+
+
+
+ # socketio starter
+ SOCKET_URL = "http://socket-server:3002"
+ SOCKET_RECONNECTION_ATTEMPTS = "10"
+ SOCKET_RECONNECTION_DELAY = "500"
+ SOCKET_TIMEOUT = "5000"
+
+ # oss starter
+ OSS_ENABLED = "true"
+ OSS_TYPE = "aws"
+
+
+
+
+
+
+ # IDaaS starter
+ IDAAS_ENABLED = "false"
+
+ # Enterprise Env
+ SESSION_NAMESPACE = "vikadata:session"
+ DEFAULT_LOCALE = "zh-CN"
+ EMAIL_PERSONAL = "维格表"
+
+ # user cooling-off time, unit: day
+ COOLING_OFF_PERIOD = 30
+
+ # scheduler
+ # close user job cron string, run at 0:00 every day
+ CLOSE_PAUSED_USER_CRON = "0 0 0 * * ?"
+ # disable heartbeat job cron
+ HEARTBEAT_CRON = "-"
+
+ }, lookup(var.envs, "backend_server", {}))
+}
+
+resource "kubernetes_config_map" "backend_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "backend-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.backend_server_env
+}
diff --git a/modules/app/config_image_tags.tf b/modules/app/config_image_tags.tf
new file mode 100644
index 0000000..36fec41
--- /dev/null
+++ b/modules/app/config_image_tags.tf
@@ -0,0 +1,34 @@
+//Image settings for all server
+locals {
+ image_tags = merge({
+ backend_server = var.image_tag
+ room_server = var.image_tag
+ web_server = var.image_tag
+ socket_server = var.image_tag
+ migration_server = var.image_tag
+ init_db = var.image_tag
+ space_job_executor = var.image_tag
+ imageproxy_server = var.image_tag
+ init_settings = var.image_tag
+ dingtalk_server = var.image_tag
+ ai_server = var.image_tag
+ databus_server = var.image_tag
+ }, var.image_tags)
+
+ image_namespaces = merge({
+ backend_server = var.image_namespace
+ room_server = var.image_namespace
+ web_server = var.image_namespace
+ socket_server = var.image_namespace
+ migration_server = var.image_namespace
+ init_db = var.image_namespace
+ space_job_executor = var.image_namespace
+ imageproxy_server = var.image_namespace
+ init_settings = var.image_namespace
+ dingtalk_server = var.image_namespace
+ job_admin_server = var.image_namespace
+ openresty = var.image_namespace
+ ai_server = var.image_namespace
+ databus_server = var.image_namespace
+ }, var.image_namespaces)
+}
diff --git a/modules/app/config_map_env.tf b/modules/app/config_map_env.tf
new file mode 100644
index 0000000..327a2be
--- /dev/null
+++ b/modules/app/config_map_env.tf
@@ -0,0 +1,42 @@
+locals {
+ //This is the global .env, it will be overwritten by the .env of other sub-services
+ env_config = merge({
+ VIKA_DATA_PATH = "/data/${var.namespace}"
+ ENV = var.namespace
+ NODE_ENV = "production"
+
+ # MySQL
+ MYSQL_HOST = "mysql-server"
+ MYSQL_PORT = "3306"
+ MYSQL_DATABASE = "apitable"
+ MYSQL_USERNAME = "apitable"
+ MYSQL_PASSWORD = "nby9WQX.zuf_dvp@vhw"
+ DATABASE_TABLE_PREFIX = "apitable_"
+
+ # Redis
+ REDIS_HOST = "redis-master.apitable-datacenter"
+ REDIS_PORT = "6379"
+ REDIS_PASSWORD = "UHWCWiuUMVyupqmW4cXV"
+ REDIS_DB = "0"
+
+ # RabbitMQ
+ RABBITMQ_HOST = "rabbitmq-headless.apitable-datacenter"
+ RABBITMQ_PORT = "5672"
+ RABBITMQ_USERNAME = "user"
+ RABBITMQ_PASSWORD = "7r4HVvsrwP4kQjAgj8Jj"
+ RABBITMQ_VHOST = "/"
+
+ # backend + nest
+ SERVER_DOMAIN = ""
+
+ ASSETS_URL = "assets"
+ ASSETS_BUCKET = "assets"
+ EDITION = "apitable-saas"
+
+ CUSTOMER_NAME = null
+ STORAGE_CLASS_NAME = ""
+
+ DATABUS_SERVER_BASE_URL = "http://databus-server:8625"
+
+ }, var.env)
+}
diff --git a/modules/app/config_map_filebeat.tf b/modules/app/config_map_filebeat.tf
new file mode 100644
index 0000000..909f442
--- /dev/null
+++ b/modules/app/config_map_filebeat.tf
@@ -0,0 +1,28 @@
+resource "kubernetes_config_map" "filebeat_config_in" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "filebeat-config-in"
+ namespace = var.namespace
+
+ labels = {
+ sa-app = "filebeat"
+ feature = "sensors"
+ }
+ }
+
+ data = {
+ "filebeat.yml" = <<-EOT
+ filebeat.inputs:
+ - type: log
+ #Read files starting with service_log in the /var/log/containers directory.
+ paths:
+ - /logs/sensors/backend-server*/service_log.*
+
+ output.logstash:
+ #Cluster Intranet Logstash.
+ hosts: ["logstash.vika-datacenter.svc.cluster.local:5044"]
+ EOT
+ }
+}
diff --git a/modules/app/databus_server.tf b/modules/app/databus_server.tf
new file mode 100644
index 0000000..bb99e32
--- /dev/null
+++ b/modules/app/databus_server.tf
@@ -0,0 +1,210 @@
+locals {
+ default_databus_server_resources = {
+ replicas = "1"
+ max_replicas = "10"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "1500m"
+ limits_memory = "8192Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ cpu_utilization_percentage = "500"
+ }
+}
+
+locals {
+ databus_server_resources = merge(local.default_databus_server_resources, lookup(var.resources, "databus_server", {}))
+}
+
+resource "kubernetes_deployment" "databus_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_databus_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_deployment.backend_server,
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "databus-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "databus-server"
+ }
+
+ # annotations = {
+ # # "deployment.kubernetes.io/revision" = "23"
+ # "configmap.databus-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.databus_server_env.data)) : "not_enabled"
+ # }
+ }
+
+ spec {
+ replicas = local.databus_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "databus-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "databus-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.databus-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.databus_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "databus-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "databus_server")}/databus-server:${lookup(local.image_tags, "databus_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "databus-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.databus_server_resources["requests_cpu"]
+ memory = local.databus_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.databus_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.databus_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/databus"
+ port = "8625"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 45
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/databus"
+ port = "8625"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/databus"
+ port = "8625"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.databus_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.databus_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "databus_server" {
+ count = var.has_databus_server ? 1 : 0
+ metadata {
+ name = "databus-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "databus-server-8625-8625"
+ protocol = "TCP"
+ port = 8625
+ target_port = "8625"
+ }
+
+
+ selector = {
+ app = "databus-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
+
+resource "kubernetes_horizontal_pod_autoscaler" "databus_server_autoscaler" {
+ count = var.has_databus_server ? 1 : 0
+ metadata {
+ name = "databus-server-autoscaler-v2beta2"
+ namespace = var.namespace
+ }
+
+ spec {
+ min_replicas = local.databus_server_resources["replicas"]
+ max_replicas = local.databus_server_resources["max_replicas"]
+
+ scale_target_ref {
+ api_version = "apps/v1"
+ kind = "Deployment"
+ name = "databus-server"
+ }
+ target_cpu_utilization_percentage = local.databus_server_resources["cpu_utilization_percentage"]
+ }
+}
diff --git a/modules/app/databus_server_env.tf b/modules/app/databus_server_env.tf
new file mode 100644
index 0000000..494542d
--- /dev/null
+++ b/modules/app/databus_server_env.tf
@@ -0,0 +1,22 @@
+locals {
+ databus_server_env = merge(local.env_config, {
+ WEB_SOCKET_CHANNEL_ENV = var.namespace
+ BACKEND_BASE_URL = "http://backend-server:8081/api/v1/"
+ OSS_HOST = "/assets"
+ }, lookup(var.envs, "databus_server", {}))
+}
+
+resource "kubernetes_config_map" "databus_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "databus-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.databus_server_env
+}
diff --git a/modules/app/fusion_server.tf b/modules/app/fusion_server.tf
new file mode 100644
index 0000000..a0a00d1
--- /dev/null
+++ b/modules/app/fusion_server.tf
@@ -0,0 +1,222 @@
+locals {
+ default_fusion_server_resources = {
+ replicas = "1"
+ max_replicas = "50"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "1500m"
+ limits_memory = "8192Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ cpu_utilization_percentage = "500"
+ }
+}
+
+locals {
+ fusion_server_resources = merge(local.default_fusion_server_resources, lookup(var.resources, "fusion_server", {}))
+}
+
+resource "kubernetes_deployment" "fusion_server" {
+ count = var.has_fusion_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_deployment.backend_server,
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "fusion-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "fusion-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.fusion_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "fusion-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "fusion-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.fusion-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.fusion_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "fusion-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "fusion-server-env"
+ }
+ }
+
+
+ resources {
+ requests = {
+ cpu = local.fusion_server_resources["requests_cpu"]
+ memory = local.fusion_server_resources["requests_memory"] //@add_tf_local
+ }
+
+ limits = {
+ cpu = local.fusion_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.fusion_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 30
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ volume_mount {
+ name = "fusion-server-1658224800000"
+ mount_path = "/home/vikadata/packages/room-server/logs"
+ }
+
+ volume_mount {
+ name = "fusion-server-1658224800000"
+ mount_path = "/app/packages/room-server/logs"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ volume {
+ name = "fusion-server-1658224800000"
+ empty_dir {}
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.fusion_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.fusion_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "fusion_server" {
+ count = var.has_fusion_server ? 1 : 0
+ metadata {
+ name = "fusion-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "fusion-server-3333-3333"
+ protocol = "TCP"
+ port = 3333
+ target_port = "3333"
+ }
+
+ selector = {
+ app = "fusion-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
+
+
+resource "kubernetes_horizontal_pod_autoscaler" "fusion_server_autoscaler" {
+ count = var.has_fusion_server ? 1 : 0
+ metadata {
+ name = "fusion-server-autoscaler-v2beta2"
+ namespace = var.namespace
+ }
+
+ spec {
+ min_replicas = local.fusion_server_resources["replicas"]
+ max_replicas = local.fusion_server_resources["max_replicas"]
+
+ scale_target_ref {
+ api_version = "apps/v1"
+ kind = "Deployment"
+ name = "fusion-server"
+ }
+ target_cpu_utilization_percentage = local.fusion_server_resources["cpu_utilization_percentage"]
+
+ }
+}
diff --git a/modules/app/fusion_server_env.tf b/modules/app/fusion_server_env.tf
new file mode 100644
index 0000000..fc866c3
--- /dev/null
+++ b/modules/app/fusion_server_env.tf
@@ -0,0 +1,29 @@
+locals {
+ fusion_server_env = merge(local.env_config, {
+ APPLICATION_NAME = "FUSION_SERVER"
+ INSTANCE_COUNT = "1"
+ LOG_LEVEL = "info"
+ BACKEND_BASE_URL = "http://backend-server:8081/api/v1/"
+ SOCKET_GRPC_URL = "socket-server:3007"
+ OSS_HOST = "/assets"
+ OSS_TYPE = "QNY1"
+ OSS_CACHE_TYPE = ""
+ ZIPKIN_ENABLED = "false"
+ ROBOT_OFFICIAL_SERVICE_SLUG = "vika"
+ }, lookup(var.envs, "fusion_server", {}))
+}
+
+resource "kubernetes_config_map" "fusion_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "fusion-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.fusion_server_env
+}
diff --git a/modules/app/imageproxy_server.tf b/modules/app/imageproxy_server.tf
new file mode 100644
index 0000000..e310b86
--- /dev/null
+++ b/modules/app/imageproxy_server.tf
@@ -0,0 +1,170 @@
+locals {
+ default_imageproxy_server_resources = {
+ replicas = "1"
+ requests_cpu = "500m"
+ requests_memory = "1024Mi"
+ limits_cpu = "1500m"
+ limits_memory = "8192Mi"
+ rolling_update_max_unavailable = "0%"
+ rolling_update_max_surge = "25%"
+ }
+}
+
+locals {
+ imageproxy_server_resources = merge(local.default_imageproxy_server_resources, lookup(var.resources, "imageproxy_server", {}))
+}
+
+resource "kubernetes_deployment" "imageproxy_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_imageproxy_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "imageproxy-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "imageproxy-server"
+ }
+
+ # annotations = {
+ # # "deployment.kubernetes.io/revision" = "23"
+ # # "configmap.imageproxy-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.imageproxy_server_env.data)) : "not_enabled"
+ # }
+ }
+
+ spec {
+ replicas = local.imageproxy_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "imageproxy-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "imageproxy-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.imageproxy-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.imageproxy_server_env[count.index].data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "imageproxy-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "imageproxy_server")}/imageproxy-server:${lookup(local.image_tags, "imageproxy_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "imageproxy-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.imageproxy_server_resources["requests_cpu"]
+ memory = local.imageproxy_server_resources["requests_memory"]
+ }
+ limits = {
+ cpu = local.imageproxy_server_resources["limits_cpu"]
+ memory = local.imageproxy_server_resources["limits_memory"]
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/metrics"
+ port = "8080"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/metrics"
+ port = "8080"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 5
+ timeout_seconds = 3
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = "25%"
+ max_surge = "25%"
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "imageproxy_server" {
+ count = var.has_imageproxy_server ? 1 : 0
+ metadata {
+ name = "imageproxy-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "imageproxy-server-8080-8080"
+ protocol = "TCP"
+ port = 80
+ target_port = "8080"
+ }
+
+ selector = {
+ app = "imageproxy-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
diff --git a/modules/app/imageproxy_server_env.tf b/modules/app/imageproxy_server_env.tf
new file mode 100644
index 0000000..f806db5
--- /dev/null
+++ b/modules/app/imageproxy_server_env.tf
@@ -0,0 +1,25 @@
+locals {
+ imageproxy_server_env = merge(local.env_config, {
+ BASEURL = "http://minio:9000",
+ TZ = "Asia/Shanghai",
+ IMAGEPROXY_CACHE = "/tmp/imageproxy"
+ }, lookup(var.envs, "imageproxy_server", {}))
+}
+
+resource "kubernetes_config_map" "imageproxy_server_env" {
+
+ count = var.has_imageproxy_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "imageproxy-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.imageproxy_server_env
+}
diff --git a/modules/app/job_admin_server.app.tf b/modules/app/job_admin_server.app.tf
new file mode 100644
index 0000000..dde6ac8
--- /dev/null
+++ b/modules/app/job_admin_server.app.tf
@@ -0,0 +1,124 @@
+# env
+locals {
+ default_job_admin_server_resources = {
+ replicas = "1"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "500m"
+ limits_memory = "1024Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ }
+}
+locals {
+ job_admin_server_env = merge(local.env_config, {
+
+ # Job Admin Server + Executor
+ # (xxl)JOB(The variable values of the dispatch center and the executor must be consistent)
+ JOB_ACCESS_TOKEN = "onJvanLmSE3CqotjNp8hb7WRolpM1pdL"
+ JOB_ADMIN_ADDRESS = "http://job-admin-server:8080/job-admin"
+
+ # SMTP Email push configuration
+ MAIL_HOST = "smtp.feishu.cn"
+ MAIL_USERNAME = "email-server@vikadata.com"
+ MAIL_PASSWORD = "Qwer123456"
+
+ }, lookup(var.envs, "job_admin_server", {}))
+
+ job_admin_server_resources = merge(local.default_job_admin_server_resources, lookup(var.resources, "job_admin_server", {}))
+}
+
+resource "kubernetes_deployment" "job_admin_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_job_admin_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "job-admin-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "job-admin-server"
+ }
+
+ annotations = {
+ # "deployment.kubernetes.io/revision" = "23"
+ }
+ }
+
+ spec {
+ replicas = local.job_admin_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "job-admin-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "job-admin-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ }
+ }
+
+ spec {
+ container {
+ name = "job-admin-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "job_admin_server")}/xxl-job-admin:2.3.0"
+
+ env {
+ name = "PARAMS"
+ value = "--server.servlet.context-path=/job-admin --spring.datasource.url=jdbc:mysql://${lookup(local.job_admin_server_env, "MYSQL_HOST", "localhost")}:${lookup(local.job_admin_server_env, "MYSQL_PORT", "3306")}/${lookup(local.job_admin_server_env, "MYSQL_DATABASE", "")}?Unicode=true&characterEncoding=UTF-8 --spring.datasource.username=${lookup(local.job_admin_server_env, "MYSQL_USERNAME", "root")} --spring.datasource.password=${sensitive(lookup(local.job_admin_server_env, "MYSQL_PASSWORD", ""))} --xxl.job.accessToken=${lookup(local.job_admin_server_env, "JOB_ACCESS_TOKEN", "")} --spring.mail.host=${lookup(local.job_admin_server_env, "MAIL_HOST", "root")} --spring.mail.port=465 --spring.mail.username=${lookup(local.job_admin_server_env, "MAIL_USERNAME", "root")} --spring.mail.from=${lookup(local.job_admin_server_env, "MAIL_USERNAME", "root")} --spring.mail.password=${lookup(local.job_admin_server_env, "MAIL_PASSWORD", "")} --spring.mail.properties.mail.smtp.socketFactory.port=465"
+ }
+
+ resources {
+ requests = {
+ cpu = local.job_admin_server_resources["requests_cpu"]
+ memory = local.job_admin_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.job_admin_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.job_admin_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.job_admin_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.job_admin_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
diff --git a/modules/app/job_admin_server.svc.tf b/modules/app/job_admin_server.svc.tf
new file mode 100644
index 0000000..5750d9f
--- /dev/null
+++ b/modules/app/job_admin_server.svc.tf
@@ -0,0 +1,30 @@
+
+# server
+resource "kubernetes_service" "job_admin_server" {
+ count = var.has_job_admin_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "job-admin-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "job-admin-server-8080-8080"
+ protocol = "TCP"
+ port = 8080
+ target_port = "8080"
+ }
+
+ selector = {
+ app = "job-admin-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
+
diff --git a/modules/app/nest_rest_server.tf b/modules/app/nest_rest_server.tf
new file mode 100644
index 0000000..8b81d73
--- /dev/null
+++ b/modules/app/nest_rest_server.tf
@@ -0,0 +1,221 @@
+locals {
+ default_nest_rest_server_resources = {
+ replicas = "1"
+ max_replicas = "50"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "1500m"
+ limits_memory = "8192Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ cpu_utilization_percentage = "500"
+ }
+}
+
+locals {
+ nest_rest_server_resources = merge(local.default_nest_rest_server_resources, lookup(var.resources, "nest_rest_server", {}))
+}
+
+resource "kubernetes_deployment" "nest_rest_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_nest_rest_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_deployment.backend_server,
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "nest-rest-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "nest-rest-server"
+ }
+
+ annotations = {
+ # "deployment.kubernetes.io/revision" = "1"
+ }
+ }
+
+ spec {
+ replicas = local.nest_rest_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "nest-rest-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "nest-rest-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "nest-rest-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "nest-rest-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.nest_rest_server_resources["requests_cpu"]
+ memory = local.nest_rest_server_resources["requests_memory"] //@add_tf_local
+ }
+
+ limits = {
+ cpu = local.fusion_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.fusion_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 45
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ volume_mount {
+ name = "nest-rest-server-1658224800000"
+ mount_path = "/home/vikadata/packages/room-server/logs"
+ }
+
+ volume_mount {
+ name = "nest-rest-server-1658224800000"
+ mount_path = "/app/packages/room-server/logs"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ volume {
+ name = "nest-rest-server-1658224800000"
+ empty_dir {}
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.nest_rest_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.nest_rest_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "nest_rest_server" {
+ metadata {
+ name = "nest-rest-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "nest-rest-server-3333-3333"
+ protocol = "TCP"
+ port = 3333
+ target_port = "3333"
+ }
+
+ selector = {
+ app = var.has_nest_rest_server ? "nest-rest-server" : "room-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
+
+resource "kubernetes_horizontal_pod_autoscaler" "nest_rest_server_autoscaler" {
+ count = var.has_room_server ? 1 : 0
+ metadata {
+ name = "nest-rest-server-autoscaler-v2beta2"
+ namespace = var.namespace
+ }
+
+ spec {
+ min_replicas = local.nest_rest_server_resources["replicas"]
+ max_replicas = local.nest_rest_server_resources["max_replicas"]
+
+ scale_target_ref {
+ api_version = "apps/v1"
+ kind = "Deployment"
+ name = "nest-rest-server"
+ }
+ target_cpu_utilization_percentage = local.nest_rest_server_resources["cpu_utilization_percentage"]
+ }
+}
diff --git a/modules/app/nest_rest_server_env.tf b/modules/app/nest_rest_server_env.tf
new file mode 100644
index 0000000..d0f590b
--- /dev/null
+++ b/modules/app/nest_rest_server_env.tf
@@ -0,0 +1,30 @@
+locals {
+ nest_rest_server_env = merge(local.env_config, {
+ WEB_SOCKET_CHANNEL_ENV = var.namespace
+ APPLICATION_NAME = "NEST_REST_SERVER"
+ INSTANCE_COUNT = "1"
+ LOG_LEVEL = "info"
+ BACKEND_BASE_URL = "http://backend-server:8081/api/v1/"
+ SOCKET_GRPC_URL = "socket-server:3007"
+ OSS_HOST = "/assets"
+ OSS_TYPE = "QNY1"
+ ZIPKIN_ENABLED = "false"
+ ROBOT_OFFICIAL_SERVICE_SLUG = "vika"
+ }, lookup(var.envs, "nest_rest_server", {}))
+}
+
+resource "kubernetes_config_map" "nest_rest_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "nest-rest-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+
+ data = local.nest_rest_server_env
+}
diff --git a/modules/app/openresty.app.tf b/modules/app/openresty.app.tf
new file mode 100644
index 0000000..03ad8c7
--- /dev/null
+++ b/modules/app/openresty.app.tf
@@ -0,0 +1,454 @@
+# env
+locals {
+ default_openresty_resources = {
+ replicas = "1"
+ requests_cpu = "250m"
+ requests_memory = "512Mi"
+ limits_cpu = "2000m"
+ limits_memory = "4096Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ lifecycle_post_start_command = [
+ "/bin/sh", "-c",
+ "ls"
+ ]
+ }
+}
+
+locals {
+ openresty_resources = merge(local.default_openresty_resources, lookup(var.resources, "openresty", {}))
+}
+
+resource "kubernetes_deployment" "openresty" {
+ wait_for_rollout = var.is_wait
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ metadata[0].annotations,
+ spec[0].template[0].metadata[0].annotations
+ ]
+ }
+ count = var.has_openresty ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "openresty"
+ namespace = var.namespace
+
+ labels = {
+ app = "openresty"
+ }
+
+ }
+
+ spec {
+ replicas = local.openresty_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "openresty"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "openresty"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655996168285"
+ "configmap.openresty-config/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.openresty_config.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ volume {
+ name = "volume-1655994097224"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655994376906"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995785123"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995785124"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995807238"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995833183"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+
+ volume {
+ name = "volume-1655995925041"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995926172"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995964531"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995964532"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995965437"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995965438"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995965439"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-1655995965440"
+
+ config_map {
+ name = "openresty-config"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-ssl-certs"
+
+ secret {
+ secret_name = "openresty-ssl-certs"
+ default_mode = "0644"
+ }
+ }
+
+ volume {
+ name = "volume-extend-ssl-certs"
+
+ secret {
+ secret_name = var.has_extend_tls ? "openresty-extend-ssl-certs" : "openresty-ssl-certs"
+ default_mode = "0644"
+ }
+ }
+
+ container {
+ name = "openresty"
+ # 重打包镜像,增加了resty-http 模块
+ image = "${var.registry}/${lookup(local.image_namespaces, "openresty")}/openresty:1.21.4.1-http-fat"
+
+ resources {
+ requests = {
+ cpu = local.openresty_resources["requests_cpu"] //@add_tf_local
+ memory = local.openresty_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.openresty_resources["limits_cpu"] //@add_tf_local
+ memory = local.openresty_resources["limits_memory"] //@add_tf_local
+ }
+ }
+ #增加读取大表灰度功能
+ lifecycle {
+ post_start {
+ exec {
+ command = local.openresty_resources["lifecycle_post_start_command"]
+ }
+ }
+ }
+
+ volume_mount {
+ name = "volume-1655994097224"
+ mount_path = "/usr/local/openresty/nginx/conf/nginx.conf"
+ sub_path = "nginx.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655994097224"
+ mount_path = "/etc/nginx/conf.d/vhost/ssl_host.conf"
+ sub_path = var.enable_ssl ? "ssl-host.conf" : "ssl-default.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655994097224"
+ mount_path = "/etc/nginx/conf.d/vhost/openresty_extra_config.conf"
+ sub_path = "openresty_extra_config.conf"
+ }
+
+ volume_mount {
+ mount_path = "/etc/nginx/conf.d/certs/tls.crt"
+ name = "volume-ssl-certs"
+ sub_path = "tls.crt"
+ }
+
+
+ volume_mount {
+ mount_path = "/etc/nginx/conf.d/certs/tls.key"
+ name = "volume-ssl-certs"
+ sub_path = "tls.key"
+ }
+
+ volume_mount {
+ mount_path = "/etc/nginx/conf.d/certs/extend-tls.crt"
+ name = "volume-extend-ssl-certs"
+ sub_path = "tls.crt"
+ }
+
+
+ volume_mount {
+ mount_path = "/etc/nginx/conf.d/certs/extend-tls.key"
+ name = "volume-extend-ssl-certs"
+ sub_path = "tls.key"
+ }
+
+ volume_mount {
+ name = "volume-1655994376906"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-socket-server.conf"
+ sub_path = "ups-socket-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995785123"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-room-server.conf"
+ sub_path = "ups-room-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995785124"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-fusion-server.conf"
+ sub_path = "ups-fusion-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995807238"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-backend-server.conf"
+ sub_path = "ups-backend-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995833183"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-web-server.conf"
+ sub_path = "ups-web-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995833183"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-ai-server.conf"
+ sub_path = "ups-ai-server.conf"
+ }
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/etc/nginx/conf.d/server/ai-server.conf"
+ sub_path = "ai-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995833183"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-databus-server.conf"
+ sub_path = "ups-databus-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/etc/nginx/conf.d/server/databus-server.conf"
+ sub_path = "databus-server.conf"
+ }
+ volume_mount {
+ name = "volume-1655995925041"
+ mount_path = "/etc/nginx/conf.d/server/web-server.conf"
+ sub_path = "web-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995926172"
+ mount_path = "/etc/nginx/conf.d/server/backend-server.conf"
+ sub_path = "backend-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995964531"
+ mount_path = "/etc/nginx/conf.d/server/room-server.conf"
+ sub_path = "room-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995964532"
+ mount_path = "/etc/nginx/conf.d/server/fusion-server.conf"
+ sub_path = "fusion-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965437"
+ mount_path = "/etc/nginx/conf.d/server/socket-server.conf"
+ sub_path = "socket-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965438"
+ mount_path = "/etc/nginx/conf.d/server/job-admin-server.conf"
+ sub_path = "job-admin-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965439"
+ mount_path = "/etc/nginx/conf.d/upstream/ups-job-admin-server.conf"
+ sub_path = "ups-job-admin-server.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/etc/nginx/conf.d/server/lbs-amap.conf"
+ sub_path = "lbs-amap.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/etc/nginx/conf.d/server/static-proxy.conf"
+ sub_path = "static-proxy.conf"
+ }
+
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/etc/nginx/conf.d/server/imageproxy-server.conf"
+ sub_path = var.has_imageproxy_server ? "imageproxy-server.conf" : "blank.config"
+ }
+
+ volume_mount {
+ name = "volume-1655995965440"
+ mount_path = "/usr/local/openresty/nginx/html/robots.txt"
+ sub_path = var.disallow_robots ? "disable_robots.txt" : "enable_robots.txt"
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/healthz"
+ port = "80"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 5
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/healthz"
+ port = "80"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 5
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = "Always"
+ }
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = "25%"
+ max_surge = "25%"
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+}
diff --git a/modules/app/openresty.configmap.tf b/modules/app/openresty.configmap.tf
new file mode 100644
index 0000000..73c45ea
--- /dev/null
+++ b/modules/app/openresty.configmap.tf
@@ -0,0 +1,696 @@
+resource "kubernetes_config_map" "openresty_config" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "openresty-config"
+ namespace = var.namespace
+ }
+
+ data = {
+ "backend-server.conf" = <<-EOT
+ location /api {
+ proxy_pass http://backend;
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_set_header Access-Control-Allow-Headers 'Cookie,Set-Cookie,x-requested-with,content-type';
+ proxy_set_header Access-Control-Allow-Origin $http_origin ;
+ proxy_set_header 'Access-Control-Allow-Credentials' 'true';
+ add_header 'Access-Control-Allow-Methods' 'GET,POST,PUT,OPTIONS';
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+
+ location /api/v1/node/readShareInfo {
+ proxy_pass http://backend;
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+ #Open API cross-domain configuration
+ add_header 'Access-Control-Allow-Origin' '*' always;
+ add_header 'Access-Control-Allow-Credentials' 'true' always;
+ add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PATCH' always;
+ add_header 'Access-Control-Allow-Headers' 'Origin, X-Requested-With,Content-Type, Accept' always;
+ if ($request_method = 'OPTIONS' ) {
+ return 204;
+ }
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+
+ EOT
+
+ "nginx.conf" = <<-EOT
+ worker_processes ${var.worker_processes};
+ # Enables the use of JIT for regular expressions to speed-up their processing.
+ pcre_jit on;
+
+ #error_log logs/error.log;
+ #error_log logs/error.log info;
+ #pid logs/nginx.pid;
+
+ events {
+ worker_connections 204800;
+ multi_accept on;
+ }
+ http {
+ include mime.types;
+ default_type application/octet-stream;
+ log_format json_combined escape=json '{"@timestamp":"$time_iso8601",'
+ '"@source":"$server_addr",'
+ '"@nginx_fields":{'
+ '"remote_addr":"$remote_addr",'
+ '"body_bytes_sent":"$body_bytes_sent",'
+ '"request_time":"$request_time",'
+ '"status":"$status",'
+ '"host":"$host",'
+ '"uri":"$uri",'
+ '"server":"$server_name",'
+ '"request_uri":"$request_uri",'
+ '"request_method":"$request_method",'
+ '"http_referrer":"$http_referer",'
+ '"body_bytes_sent":"$body_bytes_sent",'
+ '"http_x_forwarded_for":"$http_x_forwarded_for",'
+ '"http_user_agent":"$http_user_agent",'
+ '"upstream_response_time":"$upstream_response_time",'
+ '"upstream_status":"$upstream_status",'
+ '"upstream_addr":"$upstream_addr"}}';
+
+ access_log /dev/stdout json_combined;
+ server_tokens off;
+
+ # Configure Gray Shared Dictionary
+ lua_shared_dict gray 64m;
+
+ #Max size for file upload
+ client_max_body_size 1024m;
+
+ ##cache##
+ proxy_buffer_size 16k;
+ proxy_buffers 4 64k;
+ proxy_busy_buffers_size 128k;
+ proxy_cache_path /tmp/cache levels=1:2 keys_zone=cache_one:200m inactive=1d max_size=3g;
+
+ gzip on;
+ gzip_proxied any;
+ gzip_comp_level 5;
+ gzip_buffers 16 8k;
+ gzip_min_length 1024;
+ gzip_http_version 1.1;
+ gzip_types text/plain application/x-javascript text/css text/javascript application/json application/javascript;
+
+ include /etc/nginx/conf.d/upstream/*.conf;
+ map $http_upgrade $connection_upgrade {
+ default upgrade;
+ '' close;
+ }
+
+ server {
+ listen 80 default;
+ server_name 127.0.0.1;
+ charset utf-8;
+ error_page 404 502 503 /404;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+ include /etc/nginx/conf.d/server/*.conf;
+
+ location =/healthz {
+ return 200;
+ access_log off;
+ }
+
+ location =/robots.txt {
+ root /usr/local/openresty/nginx/html;
+ }
+
+ ${var.openresty_server_block}
+ }
+
+ include /etc/nginx/conf.d/vhost/*.conf;
+ }
+ EOT
+
+ #Default ssl configuration
+ "ssl-default.conf" = ""
+
+ "ssl-host.conf" = <<-EOT
+ server {
+ listen 443 ssl http2;
+ server_name ${var.server_name};
+
+ ssl_certificate /etc/nginx/conf.d/certs/tls.crt;
+ ssl_certificate_key /etc/nginx/conf.d/certs/tls.key;
+ ssl_session_timeout 1d;
+ ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
+ ssl_session_tickets off;
+
+ # intermediate configuration
+ ssl_protocols TLSv1.2 TLSv1.3;
+ ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384;
+ ssl_prefer_server_ciphers off;
+
+ charset utf-8;
+
+ error_page 404 502 503 /404;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+
+ include /etc/nginx/conf.d/server/*.conf;
+
+ ${var.openresty_server_block}
+
+ location =/healthz {
+ return 200;
+ access_log off;
+ }
+
+ location =/robots.txt {
+ root /usr/local/openresty/nginx/html;
+ }
+ }
+ EOT
+
+ "room-server.conf" = <<-EOT
+ location ~* ^/(actuator|nest) {
+ proxy_pass http://nest-rest;
+
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+
+ %{if var.has_document_server}
+ location /document {
+ proxy_pass http://document;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_set_header X-Nginx-Proxy true;
+ proxy_set_header Host $host:$server_port;
+ proxy_http_version 1.1;
+ proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+ }
+ %{endif}
+
+ %{if var.has_ai_server}
+ location ^~ /nest/v1/ai {
+ proxy_pass http://nest-rest;
+
+ chunked_transfer_encoding off;
+ proxy_buffering off;
+ proxy_cache off;
+ proxy_http_version 1.1;
+ proxy_set_header Connection '';
+
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+ %{endif}
+ EOT
+
+ "fusion-server.conf" = <<-EOT
+ location /fusion {
+ proxy_pass http://fusion;
+ proxy_next_upstream error http_502 non_idempotent;
+ proxy_next_upstream_tries 3;
+ error_page 404 503 /404;
+
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+ #Open API cross-domain configuration
+ add_header 'Access-Control-Allow-Origin' '*' always;
+ add_header 'Access-Control-Allow-Credentials' 'true' always;
+ add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS, DELETE, PATCH' always;
+ add_header 'Access-Control-Allow-Headers' 'Authorization,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Content-Range,Range,x-vika-user-agent' always;
+ if ($request_method = 'OPTIONS' ) {
+ return 204;
+ }
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+ EOT
+
+ "socket-server.conf" = <<-EOT
+ location /room {
+ proxy_pass http://socketRoom;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_set_header X-Nginx-Proxy true;
+ proxy_set_header Host $host:$server_port;
+ proxy_http_version 1.1;
+ proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+ }
+
+ location /notification {
+ proxy_pass http://socket;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_set_header X-Nginx-Proxy true;
+ proxy_set_header Host $host:$server_port;
+ proxy_http_version 1.1;
+ proxy_connect_timeout 300s;
+ proxy_read_timeout 300s;
+ proxy_send_timeout 300s;
+ }
+ EOT
+
+ "ai-server.conf" = <<-EOT
+ %{if var.has_ai_server}
+ location /ai {
+ proxy_pass http://ai-server;
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_set_header Access-Control-Allow-Headers 'Cookie,Set-Cookie,x-requested-with,content-type';
+ proxy_set_header Access-Control-Allow-Origin $http_origin ;
+ proxy_set_header 'Access-Control-Allow-Credentials' 'true';
+ proxy_buffering off;
+ add_header 'Access-Control-Allow-Methods' 'GET,POST,PUT,OPTIONS';
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+
+ %{endif}
+ EOT
+
+ "ups-ai-server.conf" = <<-EOT
+ %{if var.has_ai_server}
+ upstream ai-server {
+ server ai-server:8626;
+ }
+ %{endif}
+
+ EOT
+
+ "databus-server.conf" = <<-EOT
+ %{if var.has_databus_server && var.publish_databus_server}
+ location /databus {
+ proxy_pass http://databus-server;
+
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+ location /fusion/v3 {
+ proxy_pass http://databus-server;
+
+ proxy_set_header X-Real-Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Real-PORT $remote_port;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Original-URI $request_uri;
+
+ proxy_connect_timeout 180s;
+ proxy_read_timeout 180s;
+ proxy_send_timeout 180s;
+ }
+
+ %{endif}
+ EOT
+
+ "ups-databus-server.conf" = <<-EOT
+ %{if var.has_databus_server}
+ upstream databus-server {
+ server databus-server:8625;
+ }
+ %{endif}
+
+ EOT
+
+ "job-admin-server.conf" = <<-EOT
+ #URI aligns the context-path of the service environment variable (guaranteed that after the service is redirected, it can still be routed to the service)
+ %{if var.has_job_admin_server}
+ location /job-admin {
+ proxy_pass http://job-admin-server;
+ }
+ %{endif}
+ EOT
+
+ "ups-job-admin-server.conf" = <<-EOT
+ %{if var.has_job_admin_server}
+ upstream job-admin-server {
+ server ${var.job_admin_server_host}:8080;
+ }
+ %{endif}
+ EOT
+
+ "ups-backend-server.conf" = <<-EOT
+ upstream backend {
+ server backend-server:8081;
+ }
+ EOT
+
+ "ups-room-server.conf" = <<-EOT
+ upstream room {
+ server room-server:3333;
+ }
+ upstream nest-rest {
+ server nest-rest-server:3333;
+ }
+ %{if var.has_document_server}
+ upstream document {
+ server document-server:3006;
+ }
+ %{else}
+ upstream document {
+ server room-server:3006;
+ }
+ %{endif}
+ EOT
+
+ "ups-fusion-server.conf" = <<-EOT
+ upstream fusion {
+ server fusion-server:3333 max_fails=0;
+ server fusion-server:3333 max_fails=0;
+ server fusion-server:3333 max_fails=0;
+ }
+
+ EOT
+
+ "ups-socket-server.conf" = <<-EOT
+ upstream socket {
+ server socket-server:3002;
+ }
+ upstream socketRoom {
+ server socket-server:3005;
+ }
+ EOT
+
+ "ups-web-server.conf" = <<-EOT
+ upstream web-server {
+ server web-server:8080;
+ }
+ EOT
+
+ "web-server.conf" = <<-EOT
+ #Default official website path
+ location / {
+ proxy_set_header X-Nginx-Proxy true;
+ proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif};
+ proxy_set_header X-Original-URI $request_uri;
+ proxy_http_version 1.1;
+ proxy_pass ${var.default_server_host};
+
+ ${var.openresty_index_block}
+ }
+
+ #Help Center Search
+ location =/wp-admin/admin-ajax.php {
+ proxy_pass ${var.default_server_host};
+ proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif};
+ proxy_set_header X-Original-URI $request_uri;
+ }
+
+ #Disable path
+ location ~* ^/(wp-admin|wp-login.php|readme.html|xmlrpc.php){
+ deny all;
+ }
+
+ location =/ {
+ proxy_set_header Host %{if var.default_server_host_override_proxy_host != ""}${var.default_server_host_override_proxy_host}%{else}$http_host%{endif};
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_cache off;
+ set $cms 0;
+ ${var.openresty_index_block}
+
+ if ($uri ~* wp-login.php$){
+ proxy_pass ${var.default_server_host};
+ set $cms 1;
+ }
+ if ($args ~* home=1){
+ proxy_pass ${var.default_server_host};
+ set $cms 1;
+ }
+
+ if ($http_referer ~* "wp-admin"){
+ proxy_pass ${var.default_server_host};
+ set $cms 1;
+ }
+
+ if ($http_user_agent ~* "qihoobot|Baiduspider|Googlebot|Googlebot-Mobile|Googlebot-Image|Mediapartners-Google|Adsbot-Google|Feedfetjauntycher-Google|Yahoo! Slurp|Yahoo! Slurp China|YoudaoBot|Sosospider|Sogouspider|Sogou web spider|MSNBot|ia_archiver|Tomato Bot"){
+ proxy_pass ${var.default_server_host};
+ set $cms 1;
+ }
+
+ if ($cms = 0){
+ %{if var.default_server_host_override_proxy_host != ""~}
+ proxy_pass ${var.default_server_host};
+ %{else}
+ proxy_pass http://web-server;
+ add_header Content-Type text/html;
+ add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
+ expires off;
+ %{endif~}
+ }
+ }
+
+ #Allow embedded routes
+ location ~* ^/(login|share|404|widget-stage|embed) {
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ proxy_connect_timeout 300;
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ chunked_transfer_encoding off;
+ proxy_pass http://web-server;
+ add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
+ }
+
+ #Allow embedded routes and cache
+ location ~* ^/(custom|file) {
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ proxy_connect_timeout 300;
+ proxy_pass http://web-server;
+ add_header 'Cache-Control' 'public, max-age=2592000';
+ proxy_redirect off;
+ proxy_cache cache_one;
+ proxy_cache_valid 200 302 1h;
+ proxy_cache_valid 301 1d;
+ proxy_cache_valid any 1m;
+ expires 7d;
+ }
+
+ #Prevent external sites from embedding routes
+ location ~* ^/(space|user|invite|template|workbench|org|management|notify) {
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ proxy_connect_timeout 300;
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ chunked_transfer_encoding off;
+ proxy_pass http://web-server;
+ add_header 'Cache-Control' 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0';
+ add_header 'X-Frame-Options' 'SAMEORIGIN';
+ }
+
+ location /_next {
+ proxy_pass http://web-server;
+ proxy_connect_timeout 300;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+
+ # Static resources (config_map_kong will overwrite this in saas environment)
+ location /web_build {
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_pass http://web-server/web_build;
+ add_header 'Cache-Control' 'public, max-age=2592000';
+ proxy_redirect off;
+ proxy_cache cache_one;
+ proxy_cache_valid 200 302 1h;
+ proxy_cache_valid 301 1d;
+ proxy_cache_valid any 1m;
+ expires 7d;
+ }
+ EOT
+
+ "lbs-amap.conf" = <<-EOT
+ %{if var.lbs_amap_secret != ""~}
+ location /_AMapService/v4/map/styles {
+ set $args "$args&jscode=${var.lbs_amap_secret}";
+ proxy_pass https://webapi.amap.com/v4/map/styles;
+ }
+
+ location /_AMapService/v3/vectormap {
+ set $args "$args&jscode=${var.lbs_amap_secret}";
+ proxy_pass https://fmap01.amap.com/v3/vectormap;
+ }
+
+ location /_AMapService/ {
+ set $args "$args&jscode=${var.lbs_amap_secret}";
+ proxy_pass https://restapi.amap.com/;
+ }
+ %{else}
+ #
+ %{endif~}
+
+ EOT
+
+ #Robots
+ "disable_robots.txt" = <<-EOT
+ User-agent: *
+ Disallow: /
+ EOT
+ "enable_robots.txt" = <<-EOT
+ User-agent: *
+ Disallow: /wp-admin/
+ Allow: /wp-admin/admin-ajax.php
+ EOT
+
+ "static-proxy.conf" = <<-EOT
+ %{if var.developers_redirect_url != ""~}
+ location ^~ /developers {
+ rewrite ^/developers/?(.*)$ ${var.developers_redirect_url}/$1 permanent;
+ }
+ %{endif~}
+ location /pricing {
+ proxy_pass ${var.pricing_host};
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $connection_upgrade;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ }
+ # pricing nextjs static resources
+ location /pricing/_next {
+ proxy_pass ${var.pricing_host}/_next;
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ add_header 'Cache-Control' 'public, max-age=2592000';
+ proxy_redirect off;
+ proxy_cache cache_one;
+ proxy_cache_valid 200 302 1h;
+ proxy_cache_valid 301 1d;
+ proxy_cache_valid any 1m;
+ expires 7d;
+ }
+ EOT
+
+ "imageproxy-server.conf" = <<-EOT
+ location /${var.public_assets_bucket} {
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ # enable cache
+ proxy_cache cache_one;
+ proxy_cache_valid 200 302 3h;
+ proxy_cache_valid any 1m;
+
+ proxy_connect_timeout 300;
+ chunked_transfer_encoding off;
+ proxy_pass http://imageproxy-server/image/${var.public_assets_bucket}/;
+ if ( $args !~* ^imageView ){
+ proxy_pass ${var.minio_host};
+ }
+
+ if ( $request_method = PUT ){
+ proxy_pass ${var.minio_host};
+ }
+
+ }
+ location ~* ^/(minio) {
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ proxy_connect_timeout 300;
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ chunked_transfer_encoding off;
+ proxy_pass ${var.minio_host};
+ }
+
+
+ location /oss {
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Proto $scheme;
+
+ proxy_connect_timeout 300;
+ proxy_http_version 1.1;
+ proxy_set_header Connection "";
+ chunked_transfer_encoding off;
+ proxy_pass ${var.minio_host}/${var.public_assets_bucket}/;
+ }
+
+ location =/${var.public_assets_bucket} {
+ return 403;
+ }
+ EOT
+
+ # Empty config, used to populate the config by default
+ "blank.config" = ""
+
+ "openresty_extra_config.conf" = var.openresty_extra_config
+
+ }
+}
+
diff --git a/modules/app/openresty.secret.tf b/modules/app/openresty.secret.tf
new file mode 100644
index 0000000..9ca00d7
--- /dev/null
+++ b/modules/app/openresty.secret.tf
@@ -0,0 +1,39 @@
+resource "kubernetes_secret" "openresty-ssl-certs" {
+ depends_on = [kubernetes_namespace.this]
+ metadata {
+ name = "openresty-ssl-certs"
+ namespace = var.namespace
+ annotations = {
+ "domain" = var.server_name
+ }
+ }
+
+ data = {
+ "tls.crt" = var.tls_crt
+ "tls.key" = var.tls_key
+ }
+
+ type = "IngressTLS"
+}
+
+#extend certs for openresty
+resource "kubernetes_secret" "openresty-extend-ssl-certs" {
+ count = var.has_extend_tls ? 1 : 0
+ depends_on = [kubernetes_namespace.this]
+ metadata {
+ name = "openresty-extend-ssl-certs"
+ namespace = var.namespace
+ annotations = {
+ "domain" = var.extend_tls_data.tls_domain
+ }
+ }
+
+
+ data = {
+ "tls.crt" = var.extend_tls_data.tls_crt
+ "tls.key" = var.extend_tls_data.tls_key
+ }
+
+ type = "IngressTLS"
+}
+
diff --git a/modules/app/openresty.svc.tf b/modules/app/openresty.svc.tf
new file mode 100644
index 0000000..a6d71ae
--- /dev/null
+++ b/modules/app/openresty.svc.tf
@@ -0,0 +1,50 @@
+
+resource "kubernetes_service" "openresty_server" {
+ count = var.has_openresty ? 1 : 0
+
+ lifecycle {
+ ignore_changes = [
+ metadata[0].labels,
+ spec[0].port[0],
+ spec[0].port[1]
+ ]
+ }
+
+ metadata {
+ name = "openresty-server"
+ namespace = var.namespace
+
+ labels = {
+ # "service.beta.kubernetes.io/hash" = "d233bf00296726abd8c8fd741e86521efe44c7278d05368fdb56acb6"
+ }
+
+ annotations = var.openresty_annotations
+
+ # finalizers = ["service.k8s.alibaba/resources"]
+ }
+
+ spec {
+ port {
+ name = "http-80"
+ protocol = "TCP"
+ port = 80
+ target_port = "80"
+ }
+
+ port {
+ name = "http-443"
+ protocol = "TCP"
+ port = 443
+ target_port = "443"
+ }
+
+ selector = {
+ app = "openresty"
+ }
+
+ type = var.has_load_balancer ? "LoadBalancer" : "NodePort"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ external_traffic_policy = var.has_load_balancer ? "Local" : null
+ }
+}
diff --git a/modules/app/openresty.vars.tf b/modules/app/openresty.vars.tf
new file mode 100644
index 0000000..3ae80a2
--- /dev/null
+++ b/modules/app/openresty.vars.tf
@@ -0,0 +1,121 @@
+variable "job_admin_server_host" {
+ type = string
+ default = "job-admin-server"
+}
+
+
+variable "disallow_robots" {
+ type = bool
+ default = true
+ description = "Whether to disable crawlers"
+}
+
+variable "default_server_host" {
+ type = string
+ default = "http://web-server"
+ description = "Default route processing service"
+}
+
+variable "lbs_amap_secret" {
+ type = string
+ default = ""
+ description = "Gaode map reverse proxy security key"
+}
+
+variable "minio_host" {
+ type = string
+ default = "http://minio.apitable-datacenter:9090"
+ description = "Object storage service address"
+}
+
+variable "server_name" {
+ type = string
+ default = "vika.ltd"
+ description = "default domain name"
+}
+
+variable "enable_ssl" {
+ type = bool
+ default = false
+ description = "Whether to enable ssl"
+}
+
+variable "tls_name" {
+ type = string
+ default = ""
+ description = "tls cert name"
+}
+
+variable "tls_crt" {
+ type = string
+ default = ""
+ description = "tls cert body"
+}
+
+variable "tls_key" {
+ type = string
+ default = ""
+ description = "tls key body"
+}
+
+
+
+variable "openresty_annotations" {
+ type = map(any)
+ description = "openresty annotation, used to control load balancing specifications, slb.s1.small(5k), slb.s3.small(20w) / slb.s3.large(100w)"
+ default = {
+ "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-spec" = "slb.s1.small"
+ }
+}
+
+variable "openresty_extra_config" {
+ type = string
+ default = ""
+ description = "nginx (openresty) external configuration file, which belongs to http internal level"
+}
+
+variable "openresty_server_block" {
+ type = string
+ default = ""
+ description = "nginx (openresty) external configuration file, which belongs to the internal configuration of service"
+}
+
+variable "worker_processes" {
+ default = "auto"
+ description = "nginx(openresty) worker_processes CPU core number, the corresponding CPU core number in the old version k8s"
+}
+
+
+variable "pricing_host" {
+ type = string
+ default = "http://pricing.apitable-mkt"
+ description = "pricing server"
+}
+
+variable "openresty_index_block" {
+ type = string
+ default = ""
+ description = "Homepage URI =/, support nginx, lua code block"
+}
+
+variable "developers_redirect_url" {
+ type = string
+ default = ""
+}
+
+
+variable "has_extend_tls" {
+ description = "Whether to support extended certificate"
+ type = bool
+ default = false
+}
+
+variable "extend_tls_data" {
+ description = "Extended certificate crt and key contents"
+ type = map(any)
+ default = {
+ tls_crt = ""
+ tls_key = ""
+ tls_domain = ""
+ }
+}
\ No newline at end of file
diff --git a/modules/app/outputs.tf b/modules/app/outputs.tf
new file mode 100644
index 0000000..9f87735
--- /dev/null
+++ b/modules/app/outputs.tf
@@ -0,0 +1,9 @@
+
+#Output ingress ip
+output "ingress_ip" {
+ value = var.has_load_balancer ? kubernetes_service.openresty_server[0].status[0].load_balancer[0].ingress[0].ip : ""
+}
+
+output "ingress_ip_alternative" {
+ value = var.has_load_balancer ? try(kubernetes_service.openresty_server[0].status[0].load_balancer[0].ingress[1].ip, "") : ""
+}
diff --git a/modules/app/room_server.tf b/modules/app/room_server.tf
new file mode 100644
index 0000000..776dba5
--- /dev/null
+++ b/modules/app/room_server.tf
@@ -0,0 +1,233 @@
+locals {
+ default_room_server_resources = {
+ replicas = "2"
+ max_replicas = "20"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "1500m"
+ limits_memory = "8192Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ cpu_utilization_percentage = "500"
+ }
+}
+
+locals {
+ room_server_resources = merge(local.default_room_server_resources, lookup(var.resources, "room_server", {}))
+}
+
+resource "kubernetes_deployment" "room_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_room_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_deployment.backend_server,
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "room-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "room-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.room_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "room-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "room-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.room-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.room_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "room-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "room-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.room_server_resources["requests_cpu"]
+ memory = local.room_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.room_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.room_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 45
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/actuator/health"
+ port = "3333"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ volume_mount {
+ name = "room-server-1658224800000"
+ mount_path = "/home/vikadata/packages/room-server/logs"
+ }
+
+ volume_mount {
+ name = "room-server-1658224800000"
+ mount_path = "/app/packages/room-server/logs"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ volume {
+ name = "room-server-1658224800000"
+ empty_dir {}
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.room_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.room_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "room_server" {
+ count = var.has_room_server ? 1 : 0
+ metadata {
+ name = "room-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "room-server-3333-3333"
+ protocol = "TCP"
+ port = 3333
+ target_port = "3333"
+ }
+
+ port {
+ name = "room-server-3334-3334"
+ protocol = "TCP"
+ port = 3334
+ target_port = "3334"
+ }
+
+ port {
+ name = "room-server-3006-3006"
+ protocol = "TCP"
+ port = 3006
+ target_port = "3006"
+ }
+
+ selector = {
+ app = "room-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
+
+resource "kubernetes_horizontal_pod_autoscaler" "room_server_autoscaler" {
+ count = var.has_room_server ? 1 : 0
+ metadata {
+ name = "room-server-autoscaler-v2beta2"
+ namespace = var.namespace
+ }
+
+ spec {
+ min_replicas = local.room_server_resources["replicas"]
+ max_replicas = local.room_server_resources["max_replicas"]
+
+ scale_target_ref {
+ api_version = "apps/v1"
+ kind = "Deployment"
+ name = "room-server"
+ }
+ target_cpu_utilization_percentage = local.room_server_resources["cpu_utilization_percentage"]
+ }
+}
diff --git a/modules/app/room_server_env.tf b/modules/app/room_server_env.tf
new file mode 100644
index 0000000..b7584e9
--- /dev/null
+++ b/modules/app/room_server_env.tf
@@ -0,0 +1,31 @@
+locals {
+ room_server_env = merge(local.env_config, {
+ WEB_SOCKET_CHANNEL_ENV = var.namespace
+ APPLICATION_NAME = "ROOM_SERVER"
+ SERVER_PORT = "3333"
+ INSTANCE_COUNT = "1"
+ LOG_LEVEL = "info"
+ BACKEND_BASE_URL = "http://backend-server:8081/api/v1/"
+ SOCKET_GRPC_URL = "socket-server:3007"
+ OSS_HOST = "/assets"
+ OSS_TYPE = "QNY1"
+ ZIPKIN_ENABLED = "false"
+ ROBOT_OFFICIAL_SERVICE_SLUG = "vika"
+ DEFAULT_LANGUAGE = "en-US"
+ }, lookup(var.envs, "room_server", {}))
+}
+
+resource "kubernetes_config_map" "room_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "room-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.room_server_env
+}
diff --git a/modules/app/scheduler_server.app.tf b/modules/app/scheduler_server.app.tf
new file mode 100644
index 0000000..a7ed60c
--- /dev/null
+++ b/modules/app/scheduler_server.app.tf
@@ -0,0 +1,130 @@
+locals {
+ default_scheduler_server_resources = {
+ replicas = "1"
+ requests_cpu = "100m"
+ requests_memory = "1024Mi"
+ limits_cpu = "1500m"
+ limits_memory = "4096Mi"
+ rolling_update_max_unavailable = "0%"
+ rolling_update_max_surge = "25%"
+ }
+}
+
+locals {
+ scheduler_server_resources = merge(local.default_scheduler_server_resources, lookup(var.resources, "scheduler_server", {}))
+}
+
+resource "kubernetes_deployment" "scheduler_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_scheduler_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "scheduler-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "scheduler-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.scheduler_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "scheduler-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "scheduler-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.scheduler-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.scheduler_server_env.data)) : "not_enabled"
+
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "scheduler-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "room_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "scheduler-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.scheduler_server_resources["requests_cpu"]
+ memory = local.scheduler_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.scheduler_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.scheduler_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ volume_mount {
+ name = "scheduler-server-1658224800000"
+ mount_path = "/home/vikadata/packages/room-server/logs"
+ }
+
+ volume_mount {
+ name = "scheduler-server-1658224800000"
+ mount_path = "/app/packages/room-server/logs"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ volume {
+ name = "scheduler-server-1658224800000"
+ empty_dir {}
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.scheduler_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.scheduler_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
diff --git a/modules/app/scheduler_server.svc.tf b/modules/app/scheduler_server.svc.tf
new file mode 100644
index 0000000..fcdfce3
--- /dev/null
+++ b/modules/app/scheduler_server.svc.tf
@@ -0,0 +1,30 @@
+
+resource "kubernetes_service" "scheduler_server" {
+ count = var.has_scheduler_server ? 1 : 0
+
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+
+ metadata {
+ name = "scheduler-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "scheduler-server-3333-3333"
+ protocol = "TCP"
+ port = 3333
+ target_port = "3333"
+ }
+
+ selector = {
+ app = "scheduler-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
diff --git a/modules/app/scheduler_server_env.tf b/modules/app/scheduler_server_env.tf
new file mode 100644
index 0000000..6bae39a
--- /dev/null
+++ b/modules/app/scheduler_server_env.tf
@@ -0,0 +1,27 @@
+locals {
+ scheduler_server_env = merge(local.env_config, {
+ ENABLE_SCHED = "true"
+ APPLICATION_NAME = "SCHEDULE_SERVER"
+ INSTANCE_COUNT = "1"
+ LOG_LEVEL = "info"
+ BACKEND_BASE_URL = "http://backend-server:8081/api/v1/"
+ SOCKET_GRPC_URL = "socket-server:3007"
+ OSS_TYPE = "QNY1"
+ ZIPKIN_ENABLED = "false"
+ }, lookup(var.envs, "scheduler_server", {}))
+}
+
+resource "kubernetes_config_map" "scheduler_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "scheduler-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.scheduler_server_env
+}
diff --git a/modules/app/socket_server.tf b/modules/app/socket_server.tf
new file mode 100644
index 0000000..4c279fb
--- /dev/null
+++ b/modules/app/socket_server.tf
@@ -0,0 +1,218 @@
+locals {
+ default_socket_server_resources = {
+ replicas = "1"
+ requests_cpu = "100m"
+ requests_memory = "512Mi"
+ limits_cpu = "1000m"
+ limits_memory = "2048Mi"
+ rolling_update_max_unavailable = "25%"
+ rolling_update_max_surge = "25%"
+ }
+}
+
+locals {
+ socket_server_resources = merge(local.default_socket_server_resources, lookup(var.resources, "socket_server", {}))
+}
+
+resource "kubernetes_deployment" "socket_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_socket_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "socket-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "socket-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.socket_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "socket-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "socket-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.socket-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.socket_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ container {
+ name = "socket-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "room_server")}/room-server:${lookup(local.image_tags, "socket_server")}"
+ image_pull_policy = var.image_pull_policy
+
+ env_from {
+ config_map_ref {
+ name = "socket-server-env"
+ }
+ }
+
+ resources {
+ requests = {
+ cpu = local.socket_server_resources["requests_cpu"]
+ memory = local.socket_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.socket_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.socket_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ liveness_probe {
+ http_get {
+ path = "/socket/health"
+ port = "3001"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 30
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ readiness_probe {
+ http_get {
+ path = "/socket/health"
+ port = "3001"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ startup_probe {
+ http_get {
+ path = "/socket/health"
+ port = "3001"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ volume_mount {
+ name = "socket-server-1658224800000"
+ mount_path = "/app/packages/socket-server/logs"
+ }
+
+ volume_mount {
+ name = "socket-server-1658224800000"
+ mount_path = "/app/packages/room-server/logs"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ }
+
+ volume {
+ name = "socket-server-1658224800000"
+ empty_dir {}
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.socket_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.socket_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
+
+resource "kubernetes_service" "socket_server" {
+ count = var.has_socket_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "socket-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ name = "socket-server-3001-3001"
+ protocol = "TCP"
+ port = 3001
+ target_port = "3001"
+ }
+
+ port {
+ name = "socket-server-3002-3002"
+ protocol = "TCP"
+ port = 3002
+ target_port = "3002"
+ }
+
+ port {
+ name = "socket-server-3005-3005"
+ protocol = "TCP"
+ port = 3005
+ target_port = "3005"
+ }
+
+ port {
+ name = "socket-server-3007-3007"
+ protocol = "TCP"
+ port = 3007
+ target_port = "3007"
+ }
+
+ selector = {
+ app = "socket-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
diff --git a/modules/app/socket_server_env.tf b/modules/app/socket_server_env.tf
new file mode 100644
index 0000000..4a0f3e5
--- /dev/null
+++ b/modules/app/socket_server_env.tf
@@ -0,0 +1,38 @@
+locals {
+ socket_server_env = merge(local.env_config, {
+ WEB_SOCKET_CHANNEL_ENV = var.namespace
+ APPLICATION_NAME = "SOCKET_SERVER"
+ ENABLE_SOCKET = "true"
+ SERVER_PORT = "3001"
+
+ INSTANCE_COUNT = "1"
+ LOG_LEVEL = "info"
+
+ NEST_HEALTH_CHECK_CRON_EXPRESSION = "*/3 * * * * *"
+ NEST_HEALTH_CHECK_TIMEOUT = "1000"
+ #Deprecate: NEST_GRPC_URL => ROOM_GRPC_URL
+ NEST_GRPC_URL = "room-server:3334"
+ ROOM_GRPC_URL = "room-server:3334"
+ #Deprecate: BACKEND_GRPC_URL => SOCKET_BACKEND_GRPC_URL
+ BACKEND_GRPC_URL = "backend-server:8083"
+ SOCKET_BACKEND_GRPC_URL = "backend-server:8083"
+ GRPC_TIMEOUT_MAX_TIMES = "3"
+ NODE_MEMORY_RATIO = "80"
+
+ }, lookup(var.envs, "socket_server", {}))
+}
+
+resource "kubernetes_config_map" "socket_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "socket-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.socket_server_env
+}
diff --git a/modules/app/variables.tf b/modules/app/variables.tf
new file mode 100644
index 0000000..0bf3c75
--- /dev/null
+++ b/modules/app/variables.tf
@@ -0,0 +1,252 @@
+variable "namespace" {
+ type = string
+ default = "apitable-app"
+ description = "Shared namespace for applications"
+}
+
+variable "create_ns" {
+ default = true
+ type = bool
+ description = "Whether to automatically create namespace"
+}
+
+variable "has_load_balancer" {
+ description = "Does it come with Load Balancer? OpenResty exposes IP if any"
+ type = bool
+ default = false
+}
+
+
+variable "has_cron_job" {
+ description = "Whether it has a timed task job?"
+ type = bool
+ default = true
+}
+
+variable "has_openresty" {
+ description = "Does it come with an openresty gateway? With public IP and load balancing"
+ type = bool
+ default = true
+}
+
+variable "has_openresty_ofelia_job" {
+ description = "whether to bring a lightweight OfeliaJob Container?"
+ type = bool
+ default = false
+}
+
+variable "has_backend_server" {
+ description = "Whether to deploy Java-Api service?"
+ type = bool
+ default = true
+}
+
+variable "has_sensors_filebeat" {
+ default = true
+ type = bool
+ description = "Whether to enable Sensors data collection"
+}
+
+variable "has_job_admin_server" {
+ description = "Whether to deploy XXL-JOB-Admin service?"
+ type = bool
+ default = false
+}
+
+variable "has_space_job_executor" {
+ description = "Whether to deploy XXL-JO-workbench task executor service?"
+ type = bool
+ default = false
+}
+
+variable "has_bill_job_executor" {
+ description = "Whether to deploy XXL-JOB-subscription task executor service?"
+ type = bool
+ default = false
+}
+
+variable "has_room_server" {
+ description = "Whether to deploy the Node-Nest.js-Room-Server service?"
+ type = bool
+ default = true
+}
+
+variable "has_nest_rest_server" {
+ description = "/dataPack API only, would be removed after publishing Galaxy version"
+ type = bool
+ default = false
+}
+
+variable "has_fusion_server" {
+ description = "Whether to deploy the Node-Nest.js-Fusion-Api-Server service?"
+ type = bool
+ default = true
+}
+
+variable "has_scheduler_server" {
+ description = "Whether to deploy the Node-Nest.js-Scheduler-Server service?"
+ type = bool
+ default = true
+}
+
+variable "has_socket_server" {
+ description = "Whether to deploy the Node-Nest.js-Socket-Server service?"
+ type = bool
+ default = true
+}
+
+variable "has_document_server" {
+ description = "Whether to deploy the Node-Nest.js-Document-Server service?"
+ type = bool
+ default = false
+}
+
+variable "has_web_server" {
+ description = "Whether to deploy Web-Server (front-end template) service?"
+ type = bool
+ default = true
+}
+
+variable "has_migration_server" {
+ description = "Whether to deploy Java-Data Migration Service?"
+ type = bool
+ default = false
+}
+
+variable "has_imageproxy_server" {
+ description = "Whether to deploy the Go image clipping service?"
+ type = bool
+ default = false
+}
+
+variable "has_dingtalk_server" {
+ description = "Whether to deploy DingTalk application integration service?"
+ type = bool
+ default = false
+}
+
+variable "has_ai_server" {
+ description = "Whether to deploy AI server?"
+ type = bool
+ default = false
+}
+
+variable "has_databus_server" {
+ description = "Deploy the databus-server?"
+ type = bool
+ default = true
+}
+
+variable "publish_databus_server" {
+ description = "Publish the databus-server ?"
+ type = bool
+ default = true
+}
+
+variable "has_auto_reloaded_config_map" {
+ description = "Modify the configMap whether to automatically restart pods?"
+ type = bool
+ default = false
+}
+
+variable "registry" {
+ description = "The dockerHub, the default is ghcr.io of github, the Vika accelerator is ghcr.vikadata.com, and the private warehouse is docker.vika.ltd"
+ type = string
+ default = "ghcr.io"
+}
+
+variable "image_tag" {
+ description = "What version of the container image tag to use when initializing"
+ type = string
+ default = "latest-alpha"
+}
+
+variable "image_tags" {
+ description = "During initialization, you can freely control different container services, which tags are used respectively, and if any, overwrite image_tag. It is recommended that convention is better than configuration. Make corresponding branches in each project, and use the last image_tag variable for global unification instead of configuring here. In addition, it should be noted that the variables here are all underscored, such as the container service backend-server, the variables here correspond to backend_server, and match the terraform variable naming practice"
+ type = map(any)
+ default = {}
+}
+
+variable "image_namespace" {
+ description = "What namespace container image to use when initializing"
+ type = string
+ default = "vikadata/vika"
+}
+
+variable "image_namespaces" {
+ description = "During initialization, you can freely control different container services, which namespaces are used respectively, and if any, overwrite image_namespace. It is recommended that convention is better than configuration, and corresponding branches should be made in each project"
+ type = map(any)
+ default = {}
+}
+
+variable "env" {
+ description = "environment variable"
+ type = map(any)
+ default = {}
+}
+
+variable "envs" {
+ description = "Environment variables, submodules replace .env"
+ #type = map(any)
+ default = {
+ }
+}
+
+variable "resources" {
+ description = "How many resources are used for different services? Including copy, CPU, and memory, the unit is MB. limit is the modified value × 2, and each environment has the default value of the minimum unit to start"
+ type = any
+ default = {
+ }
+}
+
+variable "image_pull_policy" {
+ type = string
+ default = "IfNotPresent"
+}
+
+variable "is_wait" {
+ type = bool
+ default = true
+}
+
+variable "public_assets_bucket" {
+ type = string
+ default = "vk-assets-ltd"
+}
+
+variable "default_server_host_override_proxy_host" {
+ type = string
+ default = ""
+}
+
+## Deprecate
+variable "docker_edition" {
+ type = string
+ default = "vika"
+}
+
+variable "node_selector" {
+ default = {
+ }
+ description = "Node node label selector"
+}
+
+variable "ai_server_sc" {
+ default = {
+ size = "10Pi"
+ volume_attributes = {
+ subPath = "ai_server"
+ }
+ }
+ description = "ai_server storage class"
+}
+
+variable "pv_csi" {
+ default = {
+ namespace = "vika-opsbase"
+ driver = "csi.juicefs.com"
+ fs_type = "juicefs"
+ node_publish_secret_ref = "juicefs-sc-secret"
+ }
+ description = "csi storage namespace"
+}
\ No newline at end of file
diff --git a/modules/app/versions.tf b/modules/app/versions.tf
new file mode 100644
index 0000000..e8f6db1
--- /dev/null
+++ b/modules/app/versions.tf
@@ -0,0 +1,24 @@
+terraform {
+ required_version = ">= 1.0"
+
+ required_providers {
+ kubernetes = {
+ source = "hashicorp/kubernetes"
+ version = ">= 2.16.1"
+ }
+ }
+}
+
+resource "kubernetes_namespace" "this" {
+ count = var.create_ns ? 1 : 0
+ metadata {
+ name = var.namespace
+ }
+}
+
+data "kubernetes_namespace" "this" {
+ count = var.create_ns ? 0 : 1
+ metadata {
+ name = var.namespace
+ }
+}
diff --git a/modules/app/web_server.app.tf b/modules/app/web_server.app.tf
new file mode 100644
index 0000000..76b62e8
--- /dev/null
+++ b/modules/app/web_server.app.tf
@@ -0,0 +1,177 @@
+resource "kubernetes_deployment" "web_server" {
+ wait_for_rollout = var.is_wait
+ count = var.has_web_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "web-server"
+ namespace = var.namespace
+
+ labels = {
+ app = "web-server"
+ }
+
+ }
+
+ spec {
+ replicas = local.web_server_resources["replicas"]
+
+ selector {
+ match_labels = {
+ app = "web-server"
+ }
+ }
+
+ template {
+ metadata {
+ labels = {
+ app = "web-server"
+ }
+
+ annotations = {
+ # redeploy-timestamp = "1655781881366"
+ "configmap.web-server-env/reload" = var.has_auto_reloaded_config_map ? sha1(jsonencode(kubernetes_config_map.web_server_env.data)) : "not_enabled"
+ }
+ }
+
+ spec {
+
+ node_selector = var.node_selector
+
+ init_container {
+ name = "init-settings"
+ image = "${var.registry}/${lookup(local.image_namespaces, "init_settings")}/init-settings:${lookup(local.image_tags, "init_settings")}"
+ image_pull_policy = var.image_pull_policy
+ command = [
+ "sh", "-c",
+ "[ -d /settings ] && cp -afr /settings/* /tmp/vika"
+ ]
+ security_context {
+ allow_privilege_escalation = false
+ run_as_user = "0"
+ }
+ volume_mount {
+ mount_path = "/tmp/vika"
+ name = "settings"
+ sub_path = "settings"
+ }
+ }
+
+ volume {
+ name = "settings"
+ empty_dir {}
+ }
+
+ container {
+ name = "web-server"
+ image = "${var.registry}/${lookup(local.image_namespaces, "web_server")}/web-server:${lookup(local.image_tags, "web_server")}"
+
+ env_from {
+ config_map_ref {
+ name = "web-server-env"
+ }
+ }
+
+ volume_mount {
+ mount_path = "/tmp/vika"
+ name = "settings"
+ sub_path = "settings"
+ }
+
+ resources {
+ requests = {
+ cpu = local.web_server_resources["requests_cpu"] //@add_tf_local
+ memory = local.web_server_resources["requests_memory"] //@add_tf_local
+ }
+ limits = {
+ cpu = local.web_server_resources["limits_cpu"] //@add_tf_local
+ memory = local.web_server_resources["limits_memory"] //@add_tf_local
+ }
+ }
+
+ # Detect whether the application is healthy, delete and restart the container if it is unhealthy
+ liveness_probe {
+ http_get {
+ path = "/api/actuator/health"
+ port = "8080"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 45
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ # Detect whether the application is ready and in normal service state, if it is not normal, it will not receive traffic from Kubernetes Service
+ readiness_probe {
+ http_get {
+ path = "/api/actuator/health"
+ port = "8080"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 3
+ period_seconds = 15
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ # Detect whether the application is started. If it is not ready within the failureThreshold*periodSeconds period, the application process will be restarted
+ startup_probe {
+ http_get {
+ path = "/api/actuator/health"
+ port = "8080"
+ scheme = "HTTP"
+ }
+
+ initial_delay_seconds = 10
+ timeout_seconds = 1
+ period_seconds = 10
+ success_threshold = 1
+ failure_threshold = 3
+ }
+
+ command = local.web_server_resources["command"]
+
+ security_context {
+ allow_privilege_escalation = false
+ run_as_user = "0"
+ }
+
+ termination_message_path = "/dev/termination-log"
+ termination_message_policy = "File"
+ image_pull_policy = var.image_pull_policy
+ }
+
+ image_pull_secrets {
+ name = "regcred"
+ }
+ restart_policy = "Always"
+ termination_grace_period_seconds = 30
+ dns_policy = "ClusterFirst"
+ }
+ }
+
+ strategy {
+ type = "RollingUpdate"
+
+ rolling_update {
+ max_unavailable = local.web_server_resources["rolling_update_max_unavailable"]
+ max_surge = local.web_server_resources["rolling_update_max_surge"]
+ }
+ }
+
+ revision_history_limit = 10
+ progress_deadline_seconds = 600
+ }
+ //Whether to ignore the change of image tag.
+ lifecycle {
+ ignore_changes = [
+ spec[0].template[0].spec[0].affinity,
+ ]
+ }
+}
diff --git a/modules/app/web_server.env.tf b/modules/app/web_server.env.tf
new file mode 100644
index 0000000..89b996c
--- /dev/null
+++ b/modules/app/web_server.env.tf
@@ -0,0 +1,52 @@
+locals {
+ web_server_env = merge(local.env_config, {
+
+ API_PROXY = "http://backend-server:8081"
+ BACKEND_INFO_URL = "http://backend-server:8081/api/v1/client/info"
+ TEMPLATE_PATH = "./static/web_build/index.html"
+ WEB_SERVER_PORT = "8080"
+
+
+ }, lookup(var.envs, "web_server", {}))
+}
+
+resource "kubernetes_config_map" "web_server_env" {
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "web-server-env"
+ namespace = var.namespace
+
+ annotations = {
+ }
+ }
+
+ data = local.web_server_env
+}
+
+//Declare the minimum resource to start the service.
+locals {
+ default_web_server_resources = {
+ replicas = "1"
+ // ⚠️ Note: requests are required resources, not limits limit resources 200m = 0.2CPU, 1000m = 1CPU, 1 = 1CPU, 0.1 = 100m
+ // There is currently no setting for limits
+ // see: https://kubesphere.io/zh/blogs/deep-dive-into-the-k8s-request-and-limit
+ requests_cpu = "200m"
+ // Mi,Gi
+ requests_memory = "512Mi"
+ limits_cpu = "1000m"
+ limits_memory = "2048Mi"
+ rolling_update_max_unavailable = "0%"
+ rolling_update_max_surge = "25%"
+ //Change startup items and inject environment variables
+ command = [
+ "/bin/sh", "-c",
+ "[ -f /tmp/vika/run.sh ] && sh /tmp/vika/run.sh ; node server.js"
+ ]
+ }
+}
+// 合并单独命名空间(环境)声明的资源参数
+locals {
+ web_server_resources = merge(local.default_web_server_resources, lookup(var.resources, "web_server", {}))
+}
diff --git a/modules/app/web_server.svc.tf b/modules/app/web_server.svc.tf
new file mode 100644
index 0000000..479561b
--- /dev/null
+++ b/modules/app/web_server.svc.tf
@@ -0,0 +1,27 @@
+
+resource "kubernetes_service" "web_server" {
+ count = var.has_web_server ? 1 : 0
+ depends_on = [
+ kubernetes_namespace.this
+ ]
+ metadata {
+ name = "web-server"
+ namespace = var.namespace
+ }
+
+ spec {
+ port {
+ protocol = "TCP"
+ port = 8080
+ target_port = "8080"
+ }
+
+ selector = {
+ app = "web-server"
+ }
+
+ type = "ClusterIP"
+ session_affinity = "None"
+ ip_families = ["IPv4"]
+ }
+}
\ No newline at end of file
diff --git a/modules/datacenter/README.md b/modules/datacenter/README.md
new file mode 100644
index 0000000..336480d
--- /dev/null
+++ b/modules/datacenter/README.md
@@ -0,0 +1,64 @@
+
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| [terraform](#requirement\_terraform) | >= 1.0 |
+| [helm](#requirement\_helm) | >= 2.8.0 |
+| [kubernetes](#requirement\_kubernetes) | >= 2.16.1 |
+
+## Providers
+
+| Name | Version |
+|------|---------|
+| [helm](#provider\_helm) | >= 2.8.0 |
+| [kubernetes](#provider\_kubernetes) | >= 2.16.1 |
+
+## Modules
+
+No modules.
+
+## Resources
+
+| Name | Type |
+|------|------|
+| [helm_release.minio](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.mysql](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.rabbitmq](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [helm_release.redis](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
+| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/namespace) | resource |
+| [kubernetes_namespace.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/data-sources/namespace) | data source |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| [chart\_registry](#input\_chart\_registry) | Default chart-registry address | `string` | `"docker.io"` | no |
+| [chart\_registrys](#input\_chart\_registrys) | During initialization, you can freely control different container services, which registry to use, and if any, override the registry. Convention is recommended over configuration. | `map` | `{}` | no |
+| [chart\_repository](#input\_chart\_repository) | Default helm warehouse address | `string` | `"https://charts.bitnami.com/bitnami"` | no |
+| [chart\_repositorys](#input\_chart\_repositorys) | When initializing the chart, you can freely control different container services, which repository is used respectively, and if there is one, override the repository. Convention is recommended over configuration. | `map` | `{}` | no |
+| [create\_ns](#input\_create\_ns) | Whether to automatically create namespace | `bool` | `true` | no |
+| [default\_storage\_class\_name](#input\_default\_storage\_class\_name) | High-performance storage type name, used to automatically generate PVC Claims, default Alibaba Cloud parameters | `string` | `"alicloud-disk-efficiency"` | no |
+| [default\_storage\_size](#input\_default\_storage\_size) | High-performance storage volume size, the default Alibaba Cloud minimum is 20Gi | `string` | `"20Gi"` | no |
+| [has\_filebeat](#input\_has\_filebeat) | n/a | `bool` | `false` | no |
+| [has\_minio](#input\_has\_minio) | n/a | `bool` | `false` | no |
+| [has\_mysql](#input\_has\_mysql) | n/a | `bool` | `true` | no |
+| [has\_rabbitmq](#input\_has\_rabbitmq) | n/a | `bool` | `false` | no |
+| [has\_redis](#input\_has\_redis) | n/a | `bool` | `false` | no |
+| [minio\_default\_password](#input\_minio\_default\_password) | n/a | `string` | `"73VyYWygp7VakhRC6hTf"` | no |
+| [minio\_resources](#input\_minio\_resources) | resource configuration | `map` | {
"limits": {},
"requests": {}
}
| no |
+| [mysql\_default\_root\_password](#input\_mysql\_default\_root\_password) | n/a | `string` | `"6sg8vgDFcwWXP386EiZB"` | no |
+| [mysql\_init\_database](#input\_mysql\_init\_database) | n/a | `string` | `"apitable"` | no |
+| [mysql\_init\_disk\_size](#input\_mysql\_init\_disk\_size) | n/a | `string` | `"20Gi"` | no |
+| [namespace](#input\_namespace) | Note that the namespace is usually created during the process of creating the cloud storage PVC, and the namespace is not created here, only referenced | `string` | `"apitable-datacenter"` | no |
+| [rabbitmq\_default\_password](#input\_rabbitmq\_default\_password) | n/a | `string` | `"7r4HVvsrwP4kQjAgj8Jj"` | no |
+| [rabbitmq\_default\_user](#input\_rabbitmq\_default\_user) | n/a | `string` | `"user"` | no |
+| [rabbitmq\_resources](#input\_rabbitmq\_resources) | resource limits | `map` | {
"limits": {},
"requests": {}
}
| no |
+| [redis\_default\_password](#input\_redis\_default\_password) | n/a | `string` | `"UHWCWiuUMVyupqmW4cXV"` | no |
+| [redis\_disk\_size](#input\_redis\_disk\_size) | n/a | `string` | `"20Gi"` | no |
+
+## Outputs
+
+No outputs.
+
\ No newline at end of file
diff --git a/modules/datacenter/config_chart_repositorys.tf b/modules/datacenter/config_chart_repositorys.tf
new file mode 100644
index 0000000..855dc58
--- /dev/null
+++ b/modules/datacenter/config_chart_repositorys.tf
@@ -0,0 +1,32 @@
+
+locals {
+ chart_repositrys = merge({
+ mysql = var.chart_repository
+ elasticsearch = var.chart_repository
+ filebeat = var.chart_repository
+ kafka = var.chart_repository
+ kibana = var.chart_repository
+ minio = var.chart_repository
+ mongodb = var.chart_repository
+ postgresql = var.chart_repository
+ rabbitmq = var.chart_repository
+ redis = var.chart_repository
+ zookeeper = var.chart_repository
+ }, var.chart_repositorys)
+}
+
+locals {
+ chart_registrys = merge({
+ mysql = var.chart_registry
+ elasticsearch = var.chart_registry
+ filebeat = var.chart_registry
+ kafka = var.chart_registry
+ kibana = var.chart_registry
+ minio = var.chart_registry
+ mongodb = var.chart_registry
+ postgresql = var.chart_registry
+ rabbitmq = var.chart_registry
+ redis = var.chart_registry
+ zookeeper = var.chart_registry
+ }, var.chart_registrys)
+}
\ No newline at end of file
diff --git a/modules/datacenter/minio.helm.tf b/modules/datacenter/minio.helm.tf
new file mode 100644
index 0000000..26cbede
--- /dev/null
+++ b/modules/datacenter/minio.helm.tf
@@ -0,0 +1,26 @@
+# # https://github.com/bitnami/charts/tree/master/bitnami/minio
+
+
+resource "helm_release" "minio" {
+
+ count = var.has_minio ? 1 : 0
+
+ name = "minio"
+ repository = lookup(local.chart_repositrys, "minio")
+ chart = "minio"
+ namespace = var.namespace
+ values = [
+ <