Skip to content

Commit

Permalink
bump 10/27/23 09:01:48
Browse files Browse the repository at this point in the history
  • Loading branch information
Qovery committed Oct 27, 2023
1 parent 93829b3 commit c7906f5
Show file tree
Hide file tree
Showing 13 changed files with 113 additions and 128 deletions.
1 change: 0 additions & 1 deletion lib/aws/bootstrap/chart_values/nginx-ingress.j2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ controller:
minReplicas: {{ nginx_hpa_minimum_replicas }}
maxReplicas: {{ nginx_hpa_maximum_replicas }}
targetCPUUtilizationPercentage: {{ nginx_hpa_target_cpu_utilization_percentage }}
targetMemoryUtilizationPercentage: {{ nginx_hpa_target_memory_utilization_percentage }}

publishService:
enabled: true
Expand Down
1 change: 0 additions & 1 deletion lib/scaleway/bootstrap/chart_values/nginx-ingress.j2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ controller:
minReplicas: {{ nginx_hpa_minimum_replicas }}
maxReplicas: {{ nginx_hpa_maximum_replicas }}
targetCPUUtilizationPercentage: {{ nginx_hpa_target_cpu_utilization_percentage }}
targetMemoryUtilizationPercentage: {{ nginx_hpa_target_memory_utilization_percentage }}
publishService:
enabled: true
service:
Expand Down
4 changes: 2 additions & 2 deletions src/cloud_provider/aws/kubernetes/eks_helm_charts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ pub fn eks_aws_helm_charts(
request_cpu: KubernetesCpuResourceUnit::MilliCpu(
chart_config_prerequisites
.cluster_advanced_settings
.nginx_vcpu_request_in_milli,
.nginx_vcpu_request_in_milli_cpu,
),
request_memory: KubernetesMemoryResourceUnit::MebiByte(
chart_config_prerequisites
Expand All @@ -432,7 +432,7 @@ pub fn eks_aws_helm_charts(
limit_cpu: KubernetesCpuResourceUnit::MilliCpu(
chart_config_prerequisites
.cluster_advanced_settings
.nginx_vcpu_limit_in_milli,
.nginx_vcpu_limit_in_milli_cpu,
),
limit_memory: KubernetesMemoryResourceUnit::MebiByte(
chart_config_prerequisites
Expand Down
6 changes: 0 additions & 6 deletions src/cloud_provider/aws/kubernetes/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -635,12 +635,6 @@ fn tera_context(
.advanced_settings()
.nginx_hpa_cpu_utilization_percentage_threshold,
);
context.insert(
"nginx_hpa_target_memory_utilization_percentage",
&kubernetes
.advanced_settings()
.nginx_hpa_memory_utilization_percentage_threshold,
);

// EKS Addons
if kubernetes.kind() != Kind::Ec2 {
Expand Down
116 changes: 89 additions & 27 deletions src/cloud_provider/helm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -564,33 +564,6 @@ pub trait HelmChart: Send {
};
Ok(payload)
}

fn get_vpa_chart_info(&self, vpa_config: Option<CommonChartVpa>) -> ChartInfo {
let current_chart = self.get_chart_info();
let chart_name = format!("vpa-{}", current_chart.name);
ChartInfo {
name: chart_name.clone(),
path: match vpa_config.clone() {
Some(x) => x.helm_path.to_string(),
None => ".".to_string(),
},
action: match vpa_config {
Some(_) => Deploy,
None => HelmAction::Destroy,
},
namespace: current_chart.namespace,
custom_namespace: current_chart.custom_namespace.clone(),
yaml_files_content: match vpa_config {
Some(config) => vec![ChartValuesGenerated::new(
chart_name,
ChartInfo::generate_vpa_helm_config(config.vpa),
)],
None => vec![],
},
timeout_in_seconds: 15,
..Default::default()
}
}
}

impl Clone for Box<dyn HelmChart> {
Expand Down Expand Up @@ -765,6 +738,44 @@ impl CommonChart {
vertical_pod_autoscaler,
}
}

fn get_vpa_chart_info(&self, vpa_config: Option<CommonChartVpa>) -> ChartInfo {
let current_chart = self.get_chart_info();
let chart_name = format!("vpa-{}", current_chart.name);
ChartInfo {
name: chart_name.clone(),
path: match vpa_config.clone() {
Some(x) => x.helm_path.to_string(),
None => ".".to_string(),
},
action: match vpa_config {
Some(_) => Deploy,
None => HelmAction::Destroy,
},
namespace: current_chart.namespace,
custom_namespace: current_chart.custom_namespace.clone(),
yaml_files_content: match vpa_config {
Some(config) => vec![ChartValuesGenerated::new(
chart_name,
ChartInfo::generate_vpa_helm_config(config.vpa),
)],
None => vec![],
},
timeout_in_seconds: 15,
..Default::default()
}
}
}

#[derive(Default, Clone)]
pub struct ServiceChart {
pub chart_info: ChartInfo,
}

impl ServiceChart {
pub fn new(chart_info: ChartInfo) -> Self {
ServiceChart { chart_info }
}
}

/// using ChartPayload to pass random kind of data between each deployment steps against a chart deployment
Expand Down Expand Up @@ -834,6 +845,57 @@ impl HelmChart for CommonChart {
}
}

impl HelmChart for ServiceChart {
fn clone_dyn(&self) -> Box<dyn HelmChart> {
Box::new(self.clone())
}

fn check_prerequisites(&self) -> Result<Option<ChartPayload>, HelmChartError> {
Ok(None)
}

fn get_chart_info(&self) -> &ChartInfo {
&self.chart_info
}

fn exec(
&self,
kubernetes_config: &Path,
envs: &[(&str, &str)],
payload: Option<ChartPayload>,
cmd_killer: &CommandKiller,
) -> Result<Option<ChartPayload>, HelmChartError> {
let chart_info = self.get_chart_info();
let helm = Helm::new(kubernetes_config, envs)?;

match chart_info.action {
Deploy => {
let _ = helm.upgrade_diff(chart_info, &[]);
match helm.upgrade(chart_info, &[], cmd_killer) {
Ok(_) => {}
Err(e) => {
return Err(HelmChartError::HelmError(e));
}
};
}
HelmAction::Destroy => {
helm.uninstall(chart_info, &[], &CommandKiller::never(), &mut |_| {}, &mut |_| {})?;
}
HelmAction::Skip => {}
}
Ok(payload)
}

fn on_deploy_failure(
&self,
_kubernetes_config: &Path,
_envs: &[(&str, &str)],
payload: Option<ChartPayload>,
) -> Result<Option<ChartPayload>, CommandError> {
Ok(payload)
}
}

pub fn get_latest_successful_deployment(helm_history_list: &[HelmHistoryRow]) -> Result<HelmHistoryRow, CommandError> {
let mut helm_history_reversed = helm_history_list.to_owned();
helm_history_reversed.reverse();
Expand Down
33 changes: 14 additions & 19 deletions src/cloud_provider/io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,18 +94,16 @@ pub struct ClusterAdvancedSettings {
pub database_mongodb_allowed_cidrs: Vec<String>,
#[serde(alias = "registry.mirroring_mode", default = "default_registry_mirroring_mode")]
pub registry_mirroring_mode: RegistryMirroringMode,
#[serde(alias = "nginx.vcpu.request_in_milli")]
pub nginx_vcpu_request_in_milli: u32,
#[serde(alias = "nginx.vcpu.limit_in_milli")]
pub nginx_vcpu_limit_in_milli: u32,
#[serde(alias = "nginx.vcpu.request_in_milli_cpu")]
pub nginx_vcpu_request_in_milli_cpu: u32,
#[serde(alias = "nginx.vcpu.limit_in_milli_cpu")]
pub nginx_vcpu_limit_in_milli_cpu: u32,
#[serde(alias = "nginx.memory.request_in_mib")]
pub nginx_memory_request_in_mib: u32,
#[serde(alias = "nginx.memory.limit_in_mib")]
pub nginx_memory_limit_in_mib: u32,
#[serde(alias = "nginx.hpa.cpu_utilization_percentage_threshold")]
pub nginx_hpa_cpu_utilization_percentage_threshold: u32,
#[serde(alias = "nginx.hpa.memory_utilization_percentage_threshold")]
pub nginx_hpa_memory_utilization_percentage_threshold: u32,
#[serde(alias = "nginx.hpa.min_number_instances")]
pub nginx_hpa_min_number_instances: i32,
#[serde(alias = "nginx.hpa.max_number_instances")]
Expand Down Expand Up @@ -138,12 +136,11 @@ impl Default for ClusterAdvancedSettings {
database_mongodb_deny_public_access: false,
database_mongodb_allowed_cidrs: default_database_cirds,
registry_mirroring_mode: RegistryMirroringMode::Service,
nginx_vcpu_request_in_milli: 100,
nginx_vcpu_limit_in_milli: 500,
nginx_vcpu_request_in_milli_cpu: 100,
nginx_vcpu_limit_in_milli_cpu: 500,
nginx_memory_request_in_mib: 768,
nginx_memory_limit_in_mib: 768,
nginx_hpa_cpu_utilization_percentage_threshold: 50,
nginx_hpa_memory_utilization_percentage_threshold: 50,
nginx_hpa_min_number_instances: 2,
nginx_hpa_max_number_instances: 25,
}
Expand Down Expand Up @@ -285,41 +282,39 @@ mod tests {
fn test_default_values_for_nginx() {
let data = r#" {}"#;
let cluster_advanced_settings: ClusterAdvancedSettings = serde_json::from_str(data).unwrap();
assert_eq!(cluster_advanced_settings.nginx_vcpu_request_in_milli, 100);
assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli, 500);
assert_eq!(cluster_advanced_settings.nginx_vcpu_request_in_milli_cpu, 100);
assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli_cpu, 500);
assert_eq!(cluster_advanced_settings.nginx_memory_request_in_mib, 768);
assert_eq!(cluster_advanced_settings.nginx_memory_limit_in_mib, 768);
assert_eq!(cluster_advanced_settings.nginx_hpa_cpu_utilization_percentage_threshold, 50);
assert_eq!(cluster_advanced_settings.nginx_hpa_memory_utilization_percentage_threshold, 50);
assert_eq!(cluster_advanced_settings.nginx_hpa_min_number_instances, 2);
assert_eq!(cluster_advanced_settings.nginx_hpa_max_number_instances, 25);
}

#[test]
fn test_nginx_deserialization() {
let nginx_vcpu_request_in_milli = 155;
let nginx_vcpu_request_in_milli_cpu = 155;
let nginx_hpa_cpu_utilization_percentage_threshold = 75;
let data = format!(
r#"
{{
"nginx.vcpu.request_in_milli": {},
"nginx.vcpu.request_in_milli_cpu": {},
"nginx.hpa.cpu_utilization_percentage_threshold": {}
}}"#,
nginx_vcpu_request_in_milli, nginx_hpa_cpu_utilization_percentage_threshold
nginx_vcpu_request_in_milli_cpu, nginx_hpa_cpu_utilization_percentage_threshold
);
let cluster_advanced_settings: ClusterAdvancedSettings = serde_json::from_str(data.as_str()).unwrap();
assert_eq!(
cluster_advanced_settings.nginx_vcpu_request_in_milli,
nginx_vcpu_request_in_milli
cluster_advanced_settings.nginx_vcpu_request_in_milli_cpu,
nginx_vcpu_request_in_milli_cpu
);
assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli, 500);
assert_eq!(cluster_advanced_settings.nginx_vcpu_limit_in_milli_cpu, 500);
assert_eq!(cluster_advanced_settings.nginx_memory_request_in_mib, 768);
assert_eq!(cluster_advanced_settings.nginx_memory_limit_in_mib, 768);
assert_eq!(
cluster_advanced_settings.nginx_hpa_cpu_utilization_percentage_threshold,
nginx_hpa_cpu_utilization_percentage_threshold
);
assert_eq!(cluster_advanced_settings.nginx_hpa_memory_utilization_percentage_threshold, 50);
assert_eq!(cluster_advanced_settings.nginx_hpa_min_number_instances, 2);
assert_eq!(cluster_advanced_settings.nginx_hpa_max_number_instances, 25);
}
Expand Down
4 changes: 2 additions & 2 deletions src/cloud_provider/scaleway/kubernetes/helm_charts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ pub fn scw_helm_charts(
request_cpu: KubernetesCpuResourceUnit::MilliCpu(
chart_config_prerequisites
.cluster_advanced_settings
.nginx_vcpu_request_in_milli,
.nginx_vcpu_request_in_milli_cpu,
),
request_memory: KubernetesMemoryResourceUnit::MebiByte(
chart_config_prerequisites
Expand All @@ -388,7 +388,7 @@ pub fn scw_helm_charts(
limit_cpu: KubernetesCpuResourceUnit::MilliCpu(
chart_config_prerequisites
.cluster_advanced_settings
.nginx_vcpu_limit_in_milli,
.nginx_vcpu_limit_in_milli_cpu,
),
limit_memory: KubernetesMemoryResourceUnit::MebiByte(
chart_config_prerequisites
Expand Down
6 changes: 0 additions & 6 deletions src/cloud_provider/scaleway/kubernetes/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -589,12 +589,6 @@ impl Kapsule {
"nginx_hpa_target_cpu_utilization_percentage",
&self.advanced_settings().nginx_hpa_cpu_utilization_percentage_threshold,
);
context.insert(
"nginx_hpa_target_memory_utilization_percentage",
&self
.advanced_settings()
.nginx_hpa_memory_utilization_percentage_threshold,
);

Ok(context)
}
Expand Down
34 changes: 0 additions & 34 deletions src/cloud_provider/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
use std::str::FromStr;
use std::time::Duration;

Expand All @@ -17,8 +16,6 @@ use uuid::Uuid;
use crate::cloud_provider::environment::Environment;
use crate::cloud_provider::kubernetes::Kubernetes;
use crate::cloud_provider::models::{EnvironmentVariable, InvalidStatefulsetStorage};
use crate::cmd::kubectl::{kubectl_exec_delete_pod, kubectl_exec_get_pods};
use crate::cmd::structs::KubernetesPodStatusPhase;
use crate::cmd::terraform::TerraformError;
use crate::errors::{CommandError, EngineError};
use crate::events::{EnvironmentStep, EventDetails, Stage};
Expand Down Expand Up @@ -279,37 +276,6 @@ pub fn get_tfstate_name(service: &dyn Service) -> String {
format!("tfstate-default-{}", service.id())
}

pub fn delete_pending_service<P>(
kubernetes_config: P,
namespace: &str,
selector: &str,
envs: Vec<(&str, &str)>,
event_details: EventDetails,
) -> Result<(), Box<EngineError>>
where
P: AsRef<Path>,
{
match kubectl_exec_get_pods(&kubernetes_config, Some(namespace), Some(selector), envs.clone()) {
Ok(pods) => {
for pod in pods.items {
if pod.status.phase == KubernetesPodStatusPhase::Pending {
if let Err(e) = kubectl_exec_delete_pod(
&kubernetes_config,
pod.metadata.namespace.as_str(),
pod.metadata.name.as_str(),
envs.clone(),
) {
return Err(Box::new(EngineError::new_k8s_service_issue(event_details, e)));
}
}
}

Ok(())
}
Err(e) => Err(Box::new(EngineError::new_k8s_service_issue(event_details, e))),
}
}

pub async fn increase_storage_size(
namespace: &str,
invalid_statefulset: &InvalidStatefulsetStorage,
Expand Down
10 changes: 1 addition & 9 deletions src/deployment_action/deploy_application.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::cloud_provider::helm::{ChartInfo, HelmAction, HelmChartNamespaces};
use crate::cloud_provider::service::{delete_pending_service, Action, Service};
use crate::cloud_provider::service::{Action, Service};
use crate::cloud_provider::DeploymentTarget;
use crate::deployment_action::deploy_helm::HelmDeployment;
use crate::deployment_action::pause_service::PauseServiceAction;
Expand Down Expand Up @@ -80,14 +80,6 @@ where

helm.on_create(target)?;

delete_pending_service(
target.kubernetes.get_kubeconfig_file_path()?.as_str(),
target.environment.namespace(),
self.kube_label_selector().as_str(),
target.kubernetes.cloud_provider().credentials_environment_variables(),
event_details,
)?;

Ok(())
};

Expand Down
Loading

0 comments on commit c7906f5

Please sign in to comment.