diff --git a/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml b/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml index 9cb0e3929..4d855bdef 100644 --- a/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml +++ b/lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml @@ -21,7 +21,6 @@ controller: minReplicas: {{ nginx_hpa_minimum_replicas }} maxReplicas: {{ nginx_hpa_maximum_replicas }} targetCPUUtilizationPercentage: {{ nginx_hpa_target_cpu_utilization_percentage }} - targetMemoryUtilizationPercentage: {{ nginx_hpa_target_memory_utilization_percentage }} publishService: enabled: true diff --git a/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml b/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml index f8efaf909..71c86c245 100644 --- a/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml +++ b/lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml @@ -21,7 +21,6 @@ controller: minReplicas: {{ nginx_hpa_minimum_replicas }} maxReplicas: {{ nginx_hpa_maximum_replicas }} targetCPUUtilizationPercentage: {{ nginx_hpa_target_cpu_utilization_percentage }} - targetMemoryUtilizationPercentage: {{ nginx_hpa_target_memory_utilization_percentage }} publishService: enabled: true service: diff --git a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs index 2caf6ab6a..f318991d1 100644 --- a/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs +++ b/src/cloud_provider/aws/kubernetes/eks_helm_charts.rs @@ -422,7 +422,7 @@ pub fn eks_aws_helm_charts( request_cpu: KubernetesCpuResourceUnit::MilliCpu( chart_config_prerequisites .cluster_advanced_settings - .nginx_vcpu_request_in_milli, + .nginx_vcpu_request_in_milli_cpu, ), request_memory: KubernetesMemoryResourceUnit::MebiByte( chart_config_prerequisites @@ -432,7 +432,7 @@ pub fn eks_aws_helm_charts( limit_cpu: KubernetesCpuResourceUnit::MilliCpu( chart_config_prerequisites .cluster_advanced_settings - .nginx_vcpu_limit_in_milli, + .nginx_vcpu_limit_in_milli_cpu, ), limit_memory: KubernetesMemoryResourceUnit::MebiByte( chart_config_prerequisites diff --git a/src/cloud_provider/aws/kubernetes/mod.rs b/src/cloud_provider/aws/kubernetes/mod.rs index 2d1908f49..628458607 100644 --- a/src/cloud_provider/aws/kubernetes/mod.rs +++ b/src/cloud_provider/aws/kubernetes/mod.rs @@ -635,12 +635,6 @@ fn tera_context( .advanced_settings() .nginx_hpa_cpu_utilization_percentage_threshold, ); - context.insert( - "nginx_hpa_target_memory_utilization_percentage", - &kubernetes - .advanced_settings() - .nginx_hpa_memory_utilization_percentage_threshold, - ); // EKS Addons if kubernetes.kind() != Kind::Ec2 { diff --git a/src/cloud_provider/helm.rs b/src/cloud_provider/helm.rs index 876cfa15d..56cff6551 100644 --- a/src/cloud_provider/helm.rs +++ b/src/cloud_provider/helm.rs @@ -564,33 +564,6 @@ pub trait HelmChart: Send { }; Ok(payload) } - - fn get_vpa_chart_info(&self, vpa_config: Option) -> ChartInfo { - let current_chart = self.get_chart_info(); - let chart_name = format!("vpa-{}", current_chart.name); - ChartInfo { - name: chart_name.clone(), - path: match vpa_config.clone() { - Some(x) => x.helm_path.to_string(), - None => ".".to_string(), - }, - action: match vpa_config { - Some(_) => Deploy, - None => HelmAction::Destroy, - }, - namespace: current_chart.namespace, - custom_namespace: current_chart.custom_namespace.clone(), - yaml_files_content: match vpa_config { - Some(config) => vec![ChartValuesGenerated::new( - chart_name, - ChartInfo::generate_vpa_helm_config(config.vpa), - )], - None => vec![], - }, - timeout_in_seconds: 15, - ..Default::default() - } - } } impl Clone for Box { @@ -765,6 +738,44 @@ impl CommonChart { vertical_pod_autoscaler, } } + + fn get_vpa_chart_info(&self, vpa_config: Option) -> ChartInfo { + let current_chart = self.get_chart_info(); + let chart_name = format!("vpa-{}", current_chart.name); + ChartInfo { + name: chart_name.clone(), + path: match vpa_config.clone() { + Some(x) => x.helm_path.to_string(), + None => ".".to_string(), + }, + action: match vpa_config { + Some(_) => Deploy, + None => HelmAction::Destroy, + }, + namespace: current_chart.namespace, + custom_namespace: current_chart.custom_namespace.clone(), + yaml_files_content: match vpa_config { + Some(config) => vec![ChartValuesGenerated::new( + chart_name, + ChartInfo::generate_vpa_helm_config(config.vpa), + )], + None => vec![], + }, + timeout_in_seconds: 15, + ..Default::default() + } + } +} + +#[derive(Default, Clone)] +pub struct ServiceChart { + pub chart_info: ChartInfo, +} + +impl ServiceChart { + pub fn new(chart_info: ChartInfo) -> Self { + ServiceChart { chart_info } + } } /// using ChartPayload to pass random kind of data between each deployment steps against a chart deployment @@ -834,6 +845,57 @@ impl HelmChart for CommonChart { } } +impl HelmChart for ServiceChart { + fn clone_dyn(&self) -> Box { + Box::new(self.clone()) + } + + fn check_prerequisites(&self) -> Result, HelmChartError> { + Ok(None) + } + + fn get_chart_info(&self) -> &ChartInfo { + &self.chart_info + } + + fn exec( + &self, + kubernetes_config: &Path, + envs: &[(&str, &str)], + payload: Option, + cmd_killer: &CommandKiller, + ) -> Result, HelmChartError> { + let chart_info = self.get_chart_info(); + let helm = Helm::new(kubernetes_config, envs)?; + + match chart_info.action { + Deploy => { + let _ = helm.upgrade_diff(chart_info, &[]); + match helm.upgrade(chart_info, &[], cmd_killer) { + Ok(_) => {} + Err(e) => { + return Err(HelmChartError::HelmError(e)); + } + }; + } + HelmAction::Destroy => { + helm.uninstall(chart_info, &[], &CommandKiller::never(), &mut |_| {}, &mut |_| {})?; + } + HelmAction::Skip => {} + } + Ok(payload) + } + + fn on_deploy_failure( + &self, + _kubernetes_config: &Path, + _envs: &[(&str, &str)], + payload: Option, + ) -> Result, CommandError> { + Ok(payload) + } +} + pub fn get_latest_successful_deployment(helm_history_list: &[HelmHistoryRow]) -> Result { let mut helm_history_reversed = helm_history_list.to_owned(); helm_history_reversed.reverse(); diff --git a/src/cloud_provider/io.rs b/src/cloud_provider/io.rs index 1c8a2f8bd..5465d9b4b 100644 --- a/src/cloud_provider/io.rs +++ b/src/cloud_provider/io.rs @@ -94,18 +94,16 @@ pub struct ClusterAdvancedSettings { pub database_mongodb_allowed_cidrs: Vec, #[serde(alias = "registry.mirroring_mode", default = "default_registry_mirroring_mode")] pub registry_mirroring_mode: RegistryMirroringMode, - #[serde(alias = "nginx.vcpu.request_in_milli")] - pub nginx_vcpu_request_in_milli: u32, - #[serde(alias = "nginx.vcpu.limit_in_milli")] - pub nginx_vcpu_limit_in_milli: u32, + #[serde(alias = "nginx.vcpu.request_in_milli_cpu")] + pub nginx_vcpu_request_in_milli_cpu: u32, + #[serde(alias = "nginx.vcpu.limit_in_milli_cpu")] + pub nginx_vcpu_limit_in_milli_cpu: u32, #[serde(alias = "nginx.memory.request_in_mib")] pub nginx_memory_request_in_mib: u32, #[serde(alias = "nginx.memory.limit_in_mib")] pub nginx_memory_limit_in_mib: u32, #[serde(alias = "nginx.hpa.cpu_utilization_percentage_threshold")] pub nginx_hpa_cpu_utilization_percentage_threshold: u32, - #[serde(alias = "nginx.hpa.memory_utilization_percentage_threshold")] - pub nginx_hpa_memory_utilization_percentage_threshold: u32, #[serde(alias = "nginx.hpa.min_number_instances")] pub nginx_hpa_min_number_instances: i32, #[serde(alias = "nginx.hpa.max_number_instances")] @@ -138,12 +136,11 @@ impl Default for ClusterAdvancedSettings { database_mongodb_deny_public_access: false, database_mongodb_allowed_cidrs: default_database_cirds, registry_mirroring_mode: RegistryMirroringMode::Service, - nginx_vcpu_request_in_milli: 100, - nginx_vcpu_limit_in_milli: 500, + nginx_vcpu_request_in_milli_cpu: 100, + nginx_vcpu_limit_in_milli_cpu: 500, nginx_memory_request_in_mib: 768, nginx_memory_limit_in_mib: 768, nginx_hpa_cpu_utilization_percentage_threshold: 50, - nginx_hpa_memory_utilization_percentage_threshold: 50, nginx_hpa_min_number_instances: 2, nginx_hpa_max_number_instances: 25, } @@ -285,41 +282,39 @@ mod tests { fn test_default_values_for_nginx() { let data = r#" {}"#; let cluster_advanced_settings: ClusterAdvancedSettings = serde_json::from_str(data).unwrap(); - assert_eq!(cluster_advanced_settings.nginx_vcpu_request_in_milli, 100); - assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli, 500); + assert_eq!(cluster_advanced_settings.nginx_vcpu_request_in_milli_cpu, 100); + assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli_cpu, 500); assert_eq!(cluster_advanced_settings.nginx_memory_request_in_mib, 768); assert_eq!(cluster_advanced_settings.nginx_memory_limit_in_mib, 768); assert_eq!(cluster_advanced_settings.nginx_hpa_cpu_utilization_percentage_threshold, 50); - assert_eq!(cluster_advanced_settings.nginx_hpa_memory_utilization_percentage_threshold, 50); assert_eq!(cluster_advanced_settings.nginx_hpa_min_number_instances, 2); assert_eq!(cluster_advanced_settings.nginx_hpa_max_number_instances, 25); } #[test] fn test_nginx_deserialization() { - let nginx_vcpu_request_in_milli = 155; + let nginx_vcpu_request_in_milli_cpu = 155; let nginx_hpa_cpu_utilization_percentage_threshold = 75; let data = format!( r#" {{ - "nginx.vcpu.request_in_milli": {}, + "nginx.vcpu.request_in_milli_cpu": {}, "nginx.hpa.cpu_utilization_percentage_threshold": {} }}"#, - nginx_vcpu_request_in_milli, nginx_hpa_cpu_utilization_percentage_threshold + nginx_vcpu_request_in_milli_cpu, nginx_hpa_cpu_utilization_percentage_threshold ); let cluster_advanced_settings: ClusterAdvancedSettings = serde_json::from_str(data.as_str()).unwrap(); assert_eq!( - cluster_advanced_settings.nginx_vcpu_request_in_milli, - nginx_vcpu_request_in_milli + cluster_advanced_settings.nginx_vcpu_request_in_milli_cpu, + nginx_vcpu_request_in_milli_cpu ); - assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli, 500); + assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli_cpu, 500); assert_eq!(cluster_advanced_settings.nginx_memory_request_in_mib, 768); assert_eq!(cluster_advanced_settings.nginx_memory_limit_in_mib, 768); assert_eq!( cluster_advanced_settings.nginx_hpa_cpu_utilization_percentage_threshold, nginx_hpa_cpu_utilization_percentage_threshold ); - assert_eq!(cluster_advanced_settings.nginx_hpa_memory_utilization_percentage_threshold, 50); assert_eq!(cluster_advanced_settings.nginx_hpa_min_number_instances, 2); assert_eq!(cluster_advanced_settings.nginx_hpa_max_number_instances, 25); } diff --git a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs index 0cda7315c..3efff1711 100644 --- a/src/cloud_provider/scaleway/kubernetes/helm_charts.rs +++ b/src/cloud_provider/scaleway/kubernetes/helm_charts.rs @@ -378,7 +378,7 @@ pub fn scw_helm_charts( request_cpu: KubernetesCpuResourceUnit::MilliCpu( chart_config_prerequisites .cluster_advanced_settings - .nginx_vcpu_request_in_milli, + .nginx_vcpu_request_in_milli_cpu, ), request_memory: KubernetesMemoryResourceUnit::MebiByte( chart_config_prerequisites @@ -388,7 +388,7 @@ pub fn scw_helm_charts( limit_cpu: KubernetesCpuResourceUnit::MilliCpu( chart_config_prerequisites .cluster_advanced_settings - .nginx_vcpu_limit_in_milli, + .nginx_vcpu_limit_in_milli_cpu, ), limit_memory: KubernetesMemoryResourceUnit::MebiByte( chart_config_prerequisites diff --git a/src/cloud_provider/scaleway/kubernetes/mod.rs b/src/cloud_provider/scaleway/kubernetes/mod.rs index b903b3c7c..d28f7eb3d 100644 --- a/src/cloud_provider/scaleway/kubernetes/mod.rs +++ b/src/cloud_provider/scaleway/kubernetes/mod.rs @@ -589,12 +589,6 @@ impl Kapsule { "nginx_hpa_target_cpu_utilization_percentage", &self.advanced_settings().nginx_hpa_cpu_utilization_percentage_threshold, ); - context.insert( - "nginx_hpa_target_memory_utilization_percentage", - &self - .advanced_settings() - .nginx_hpa_memory_utilization_percentage_threshold, - ); Ok(context) } diff --git a/src/cloud_provider/service.rs b/src/cloud_provider/service.rs index 098a643bf..24c2b6a81 100644 --- a/src/cloud_provider/service.rs +++ b/src/cloud_provider/service.rs @@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; use std::fs::File; use std::io::BufReader; -use std::path::Path; use std::str::FromStr; use std::time::Duration; @@ -17,8 +16,6 @@ use uuid::Uuid; use crate::cloud_provider::environment::Environment; use crate::cloud_provider::kubernetes::Kubernetes; use crate::cloud_provider::models::{EnvironmentVariable, InvalidStatefulsetStorage}; -use crate::cmd::kubectl::{kubectl_exec_delete_pod, kubectl_exec_get_pods}; -use crate::cmd::structs::KubernetesPodStatusPhase; use crate::cmd::terraform::TerraformError; use crate::errors::{CommandError, EngineError}; use crate::events::{EnvironmentStep, EventDetails, Stage}; @@ -279,37 +276,6 @@ pub fn get_tfstate_name(service: &dyn Service) -> String { format!("tfstate-default-{}", service.id()) } -pub fn delete_pending_service

( - kubernetes_config: P, - namespace: &str, - selector: &str, - envs: Vec<(&str, &str)>, - event_details: EventDetails, -) -> Result<(), Box> -where - P: AsRef, -{ - match kubectl_exec_get_pods(&kubernetes_config, Some(namespace), Some(selector), envs.clone()) { - Ok(pods) => { - for pod in pods.items { - if pod.status.phase == KubernetesPodStatusPhase::Pending { - if let Err(e) = kubectl_exec_delete_pod( - &kubernetes_config, - pod.metadata.namespace.as_str(), - pod.metadata.name.as_str(), - envs.clone(), - ) { - return Err(Box::new(EngineError::new_k8s_service_issue(event_details, e))); - } - } - } - - Ok(()) - } - Err(e) => Err(Box::new(EngineError::new_k8s_service_issue(event_details, e))), - } -} - pub async fn increase_storage_size( namespace: &str, invalid_statefulset: &InvalidStatefulsetStorage, diff --git a/src/deployment_action/deploy_application.rs b/src/deployment_action/deploy_application.rs index 455768e2d..348ab234c 100644 --- a/src/deployment_action/deploy_application.rs +++ b/src/deployment_action/deploy_application.rs @@ -1,5 +1,5 @@ use crate::cloud_provider::helm::{ChartInfo, HelmAction, HelmChartNamespaces}; -use crate::cloud_provider::service::{delete_pending_service, Action, Service}; +use crate::cloud_provider::service::{Action, Service}; use crate::cloud_provider::DeploymentTarget; use crate::deployment_action::deploy_helm::HelmDeployment; use crate::deployment_action::pause_service::PauseServiceAction; @@ -80,14 +80,6 @@ where helm.on_create(target)?; - delete_pending_service( - target.kubernetes.get_kubeconfig_file_path()?.as_str(), - target.environment.namespace(), - self.kube_label_selector().as_str(), - target.kubernetes.cloud_provider().credentials_environment_variables(), - event_details, - )?; - Ok(()) }; diff --git a/src/deployment_action/deploy_container.rs b/src/deployment_action/deploy_container.rs index bdf81a19f..09ddfeff1 100644 --- a/src/deployment_action/deploy_container.rs +++ b/src/deployment_action/deploy_container.rs @@ -1,5 +1,5 @@ use crate::cloud_provider::helm::{ChartInfo, HelmAction, HelmChartNamespaces}; -use crate::cloud_provider::service::{delete_pending_service, Action, Service}; +use crate::cloud_provider::service::{Action, Service}; use crate::cloud_provider::DeploymentTarget; use crate::deployment_action::deploy_helm::HelmDeployment; use crate::deployment_action::pause_service::PauseServiceAction; @@ -115,14 +115,6 @@ where helm.on_create(target)?; - delete_pending_service( - target.kubernetes.get_kubeconfig_file_path()?.as_str(), - target.environment.namespace(), - self.kube_label_selector().as_str(), - target.kubernetes.cloud_provider().credentials_environment_variables(), - event_details.clone(), - )?; - Ok(state) }; diff --git a/src/deployment_action/deploy_database.rs b/src/deployment_action/deploy_database.rs index f3ee8b96d..31a764b21 100644 --- a/src/deployment_action/deploy_database.rs +++ b/src/deployment_action/deploy_database.rs @@ -1,5 +1,5 @@ use crate::cloud_provider::helm::{ChartInfo, ChartSetValue, HelmAction, HelmChartNamespaces}; -use crate::cloud_provider::service::{delete_pending_service, get_database_terraform_config, Action, Service}; +use crate::cloud_provider::service::{get_database_terraform_config, Action, Service}; use crate::cloud_provider::Kind::Aws; use crate::cloud_provider::{service, DeploymentTarget}; use crate::cmd; @@ -779,14 +779,6 @@ where }; }; - delete_pending_service( - target.kubernetes.get_kubeconfig_file_path()?.as_str(), - target.environment.namespace(), - self.kube_label_selector().as_str(), - target.kubernetes.cloud_provider().credentials_environment_variables(), - event_details.clone(), - )?; - Ok(()) }; diff --git a/src/deployment_action/deploy_helm.rs b/src/deployment_action/deploy_helm.rs index e1d9b3856..eb9e17076 100644 --- a/src/deployment_action/deploy_helm.rs +++ b/src/deployment_action/deploy_helm.rs @@ -1,4 +1,4 @@ -use crate::cloud_provider::helm::{ChartInfo, CommonChart, HelmChart}; +use crate::cloud_provider::helm::{ChartInfo, HelmChart, ServiceChart}; use crate::cloud_provider::DeploymentTarget; use crate::cmd::command::CommandKiller; use crate::deployment_action::DeploymentAction; @@ -92,8 +92,8 @@ impl DeploymentAction for HelmDeployment { fn on_create(&self, target: &DeploymentTarget) -> Result<(), Box> { self.prepare_helm_chart()?; - let common_chart = CommonChart::new(self.helm_chart.clone(), None, None); - let chart: Box = Box::new(common_chart); + let service_chart = ServiceChart::new(self.helm_chart.clone()); + let chart: Box = Box::new(service_chart); let kubeconfig_string = target.kubernetes.get_kubeconfig_file_path()?; let kubeconfig = Path::new(kubeconfig_string.as_str());