Skip to content

Commit

Permalink
rename spec variables
Browse files Browse the repository at this point in the history
  • Loading branch information
nojnhuh committed Nov 3, 2023
1 parent 5411550 commit ed6e3f9
Show file tree
Hide file tree
Showing 2 changed files with 73 additions and 75 deletions.
65 changes: 32 additions & 33 deletions azure/services/agentpools/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,36 +174,35 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing *asocontainerse
agentPool = &asocontainerservicev1.ManagedClustersAgentPool{}
}

spec := &agentPool.Spec
spec.AzureName = s.AzureName
spec.Owner = &genruntime.KnownResourceReference{
agentPool.Spec.AzureName = s.AzureName
agentPool.Spec.Owner = &genruntime.KnownResourceReference{
Name: s.Cluster,
}
spec.AvailabilityZones = s.AvailabilityZones
spec.Count = &s.Replicas
spec.EnableAutoScaling = ptr.To(s.EnableAutoScaling)
spec.EnableUltraSSD = s.EnableUltraSSD
spec.KubeletDiskType = azure.AliasOrNil[asocontainerservicev1.KubeletDiskType]((*string)(s.KubeletDiskType))
spec.MaxCount = s.MaxCount
spec.MaxPods = s.MaxPods
spec.MinCount = s.MinCount
spec.Mode = ptr.To(asocontainerservicev1.AgentPoolMode(s.Mode))
spec.NodeLabels = s.NodeLabels
spec.NodeTaints = s.NodeTaints
spec.OrchestratorVersion = s.Version
spec.OsDiskSizeGB = ptr.To(asocontainerservicev1.ContainerServiceOSDisk(s.OSDiskSizeGB))
spec.OsDiskType = azure.AliasOrNil[asocontainerservicev1.OSDiskType](s.OsDiskType)
spec.OsType = azure.AliasOrNil[asocontainerservicev1.OSType](s.OSType)
spec.ScaleSetPriority = azure.AliasOrNil[asocontainerservicev1.ScaleSetPriority](s.ScaleSetPriority)
spec.ScaleDownMode = azure.AliasOrNil[asocontainerservicev1.ScaleDownMode](s.ScaleDownMode)
spec.Type = ptr.To(asocontainerservicev1.AgentPoolType_VirtualMachineScaleSets)
spec.EnableNodePublicIP = s.EnableNodePublicIP
spec.Tags = s.AdditionalTags
spec.EnableFIPS = s.EnableFIPS
spec.EnableEncryptionAtHost = s.EnableEncryptionAtHost
agentPool.Spec.AvailabilityZones = s.AvailabilityZones
agentPool.Spec.Count = &s.Replicas
agentPool.Spec.EnableAutoScaling = ptr.To(s.EnableAutoScaling)
agentPool.Spec.EnableUltraSSD = s.EnableUltraSSD
agentPool.Spec.KubeletDiskType = azure.AliasOrNil[asocontainerservicev1.KubeletDiskType]((*string)(s.KubeletDiskType))
agentPool.Spec.MaxCount = s.MaxCount
agentPool.Spec.MaxPods = s.MaxPods
agentPool.Spec.MinCount = s.MinCount
agentPool.Spec.Mode = ptr.To(asocontainerservicev1.AgentPoolMode(s.Mode))
agentPool.Spec.NodeLabels = s.NodeLabels
agentPool.Spec.NodeTaints = s.NodeTaints
agentPool.Spec.OrchestratorVersion = s.Version
agentPool.Spec.OsDiskSizeGB = ptr.To(asocontainerservicev1.ContainerServiceOSDisk(s.OSDiskSizeGB))
agentPool.Spec.OsDiskType = azure.AliasOrNil[asocontainerservicev1.OSDiskType](s.OsDiskType)
agentPool.Spec.OsType = azure.AliasOrNil[asocontainerservicev1.OSType](s.OSType)
agentPool.Spec.ScaleSetPriority = azure.AliasOrNil[asocontainerservicev1.ScaleSetPriority](s.ScaleSetPriority)
agentPool.Spec.ScaleDownMode = azure.AliasOrNil[asocontainerservicev1.ScaleDownMode](s.ScaleDownMode)
agentPool.Spec.Type = ptr.To(asocontainerservicev1.AgentPoolType_VirtualMachineScaleSets)
agentPool.Spec.EnableNodePublicIP = s.EnableNodePublicIP
agentPool.Spec.Tags = s.AdditionalTags
agentPool.Spec.EnableFIPS = s.EnableFIPS
agentPool.Spec.EnableEncryptionAtHost = s.EnableEncryptionAtHost

if s.KubeletConfig != nil {
spec.KubeletConfig = &asocontainerservicev1.KubeletConfig{
agentPool.Spec.KubeletConfig = &asocontainerservicev1.KubeletConfig{
CpuManagerPolicy: s.KubeletConfig.CPUManagerPolicy,
CpuCfsQuota: s.KubeletConfig.CPUCfsQuota,
CpuCfsQuotaPeriod: s.KubeletConfig.CPUCfsQuotaPeriod,
Expand All @@ -219,33 +218,33 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing *asocontainerse
}

if s.SKU != "" {
spec.VmSize = &s.SKU
agentPool.Spec.VmSize = &s.SKU
}

if s.SpotMaxPrice != nil {
spec.SpotMaxPrice = ptr.To(s.SpotMaxPrice.AsApproximateFloat64())
agentPool.Spec.SpotMaxPrice = ptr.To(s.SpotMaxPrice.AsApproximateFloat64())
}

if s.VnetSubnetID != "" {
spec.VnetSubnetReference = &genruntime.ResourceReference{
agentPool.Spec.VnetSubnetReference = &genruntime.ResourceReference{
ARMID: s.VnetSubnetID,
}
}

if s.NodePublicIPPrefixID != "" {
spec.NodePublicIPPrefixReference = &genruntime.ResourceReference{
agentPool.Spec.NodePublicIPPrefixReference = &genruntime.ResourceReference{
ARMID: s.NodePublicIPPrefixID,
}
}

if s.LinuxOSConfig != nil {
spec.LinuxOSConfig = &asocontainerservicev1.LinuxOSConfig{
agentPool.Spec.LinuxOSConfig = &asocontainerservicev1.LinuxOSConfig{
SwapFileSizeMB: s.LinuxOSConfig.SwapFileSizeMB,
TransparentHugePageEnabled: (*string)(s.LinuxOSConfig.TransparentHugePageEnabled),
TransparentHugePageDefrag: (*string)(s.LinuxOSConfig.TransparentHugePageDefrag),
}
if s.LinuxOSConfig.Sysctls != nil {
spec.LinuxOSConfig.Sysctls = &asocontainerservicev1.SysctlConfig{
agentPool.Spec.LinuxOSConfig.Sysctls = &asocontainerservicev1.SysctlConfig{
FsAioMaxNr: s.LinuxOSConfig.Sysctls.FsAioMaxNr,
FsFileMax: s.LinuxOSConfig.Sysctls.FsFileMax,
FsInotifyMaxUserWatches: s.LinuxOSConfig.Sysctls.FsInotifyMaxUserWatches,
Expand Down Expand Up @@ -282,7 +281,7 @@ func (s *AgentPoolSpec) Parameters(ctx context.Context, existing *asocontainerse
// count present in MachinePool or AzureManagedMachinePool, hence we should not make an update API call based
// on difference in count.
if s.EnableAutoScaling && agentPool.Status.Count != nil {
spec.Count = agentPool.Status.Count
agentPool.Spec.Count = agentPool.Status.Count
}

return agentPool, nil
Expand Down
83 changes: 41 additions & 42 deletions azure/services/managedclusters/spec.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,32 +313,31 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
}
}

spec := &managedCluster.Spec
spec.AzureName = s.Name
spec.Owner = &genruntime.KnownResourceReference{
managedCluster.Spec.AzureName = s.Name
managedCluster.Spec.Owner = &genruntime.KnownResourceReference{
Name: s.ResourceGroup,
}
spec.Identity = &asocontainerservicev1.ManagedClusterIdentity{
managedCluster.Spec.Identity = &asocontainerservicev1.ManagedClusterIdentity{
Type: ptr.To(asocontainerservicev1.ManagedClusterIdentity_Type_SystemAssigned),
}
spec.Location = &s.Location
spec.NodeResourceGroup = &s.NodeResourceGroup
spec.EnableRBAC = ptr.To(true)
spec.DnsPrefix = s.DNSPrefix
spec.KubernetesVersion = &s.Version
spec.ServicePrincipalProfile = &asocontainerservicev1.ManagedClusterServicePrincipalProfile{
managedCluster.Spec.Location = &s.Location
managedCluster.Spec.NodeResourceGroup = &s.NodeResourceGroup
managedCluster.Spec.EnableRBAC = ptr.To(true)
managedCluster.Spec.DnsPrefix = s.DNSPrefix
managedCluster.Spec.KubernetesVersion = &s.Version
managedCluster.Spec.ServicePrincipalProfile = &asocontainerservicev1.ManagedClusterServicePrincipalProfile{
ClientId: ptr.To("msi"),
}
spec.NetworkProfile = &asocontainerservicev1.ContainerServiceNetworkProfile{
managedCluster.Spec.NetworkProfile = &asocontainerservicev1.ContainerServiceNetworkProfile{
NetworkPlugin: azure.AliasOrNil[asocontainerservicev1.ContainerServiceNetworkProfile_NetworkPlugin](&s.NetworkPlugin),
LoadBalancerSku: azure.AliasOrNil[asocontainerservicev1.ContainerServiceNetworkProfile_LoadBalancerSku](&s.LoadBalancerSKU),
NetworkPolicy: azure.AliasOrNil[asocontainerservicev1.ContainerServiceNetworkProfile_NetworkPolicy](&s.NetworkPolicy),
}
spec.AutoScalerProfile = buildAutoScalerProfile(s.AutoScalerProfile)
managedCluster.Spec.AutoScalerProfile = buildAutoScalerProfile(s.AutoScalerProfile)

// OperatorSpec defines how the Secrets generated by ASO should look for the AKS cluster kubeconfigs.
// There is no prescribed naming convention that must be followed.
spec.OperatorSpec = &asocontainerservicev1.ManagedClusterOperatorSpec{
managedCluster.Spec.OperatorSpec = &asocontainerservicev1.ManagedClusterOperatorSpec{
Secrets: &asocontainerservicev1.ManagedClusterOperatorSecrets{
AdminCredentials: &genruntime.SecretDestination{
Name: adminKubeconfigSecretName(s.ClusterName),
Expand All @@ -355,7 +354,7 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
}

Check warning on line 354 in azure/services/managedclusters/spec.go

View check run for this annotation

Codecov / codecov/patch

azure/services/managedclusters/spec.go#L353-L354

Added lines #L353 - L354 were not covered by tests
}
if decodedSSHPublicKey != nil {
spec.LinuxProfile = &asocontainerservicev1.ContainerServiceLinuxProfile{
managedCluster.Spec.LinuxProfile = &asocontainerservicev1.ContainerServiceLinuxProfile{
AdminUsername: ptr.To(azure.DefaultAKSUserName),
Ssh: &asocontainerservicev1.ContainerServiceSshConfiguration{
PublicKeys: []asocontainerservicev1.ContainerServiceSshPublicKey{
Expand All @@ -368,17 +367,17 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
}

if s.NetworkPluginMode != nil {
spec.NetworkProfile.NetworkPluginMode = ptr.To(asocontainerservicev1.ContainerServiceNetworkProfile_NetworkPluginMode(*s.NetworkPluginMode))
managedCluster.Spec.NetworkProfile.NetworkPluginMode = ptr.To(asocontainerservicev1.ContainerServiceNetworkProfile_NetworkPluginMode(*s.NetworkPluginMode))
}

if s.PodCIDR != "" {
spec.NetworkProfile.PodCidr = &s.PodCIDR
managedCluster.Spec.NetworkProfile.PodCidr = &s.PodCIDR
}

if s.ServiceCIDR != "" {
spec.NetworkProfile.DnsServiceIP = s.DNSServiceIP
managedCluster.Spec.NetworkProfile.DnsServiceIP = s.DNSServiceIP
if s.DNSServiceIP == nil {
spec.NetworkProfile.ServiceCidr = &s.ServiceCIDR
managedCluster.Spec.NetworkProfile.ServiceCidr = &s.ServiceCIDR
ip, _, err := net.ParseCIDR(s.ServiceCIDR)
if err != nil {
return nil, fmt.Errorf("failed to parse service cidr: %w", err)
Expand All @@ -389,35 +388,35 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
// https://golang.org/src/net/ip.go#L48
ip[15] = byte(10)
dnsIP := ip.String()
spec.NetworkProfile.DnsServiceIP = &dnsIP
managedCluster.Spec.NetworkProfile.DnsServiceIP = &dnsIP
}
}

if s.AADProfile != nil {
spec.AadProfile = &asocontainerservicev1.ManagedClusterAADProfile{
managedCluster.Spec.AadProfile = &asocontainerservicev1.ManagedClusterAADProfile{
Managed: &s.AADProfile.Managed,
EnableAzureRBAC: &s.AADProfile.EnableAzureRBAC,
AdminGroupObjectIDs: s.AADProfile.AdminGroupObjectIDs,
}
if s.DisableLocalAccounts != nil {
spec.DisableLocalAccounts = s.DisableLocalAccounts
managedCluster.Spec.DisableLocalAccounts = s.DisableLocalAccounts
}

if ptr.Deref(s.DisableLocalAccounts, false) {
// admin credentials cannot be fetched when local accounts are disabled
spec.OperatorSpec.Secrets.AdminCredentials = nil
managedCluster.Spec.OperatorSpec.Secrets.AdminCredentials = nil
}
if s.AADProfile.Managed {
spec.OperatorSpec.Secrets.UserCredentials = &genruntime.SecretDestination{
managedCluster.Spec.OperatorSpec.Secrets.UserCredentials = &genruntime.SecretDestination{
Name: userKubeconfigSecretName(s.ClusterName),
Key: secret.KubeconfigDataName,
}
}
}

for i := range s.AddonProfiles {
if spec.AddonProfiles == nil {
spec.AddonProfiles = map[string]asocontainerservicev1.ManagedClusterAddonProfile{}
if managedCluster.Spec.AddonProfiles == nil {
managedCluster.Spec.AddonProfiles = map[string]asocontainerservicev1.ManagedClusterAddonProfile{}
}
item := s.AddonProfiles[i]
addonProfile := asocontainerservicev1.ManagedClusterAddonProfile{
Expand All @@ -426,46 +425,46 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
if item.Config != nil {
addonProfile.Config = item.Config
}
spec.AddonProfiles[item.Name] = addonProfile
managedCluster.Spec.AddonProfiles[item.Name] = addonProfile
}

if s.SKU != nil {
tierName := asocontainerservicev1.ManagedClusterSKU_Tier(s.SKU.Tier)
spec.Sku = &asocontainerservicev1.ManagedClusterSKU{
managedCluster.Spec.Sku = &asocontainerservicev1.ManagedClusterSKU{
Name: ptr.To(asocontainerservicev1.ManagedClusterSKU_Name("Base")),
Tier: ptr.To(tierName),
}
}

if s.LoadBalancerProfile != nil {
spec.NetworkProfile.LoadBalancerProfile = s.GetLoadBalancerProfile()
managedCluster.Spec.NetworkProfile.LoadBalancerProfile = s.GetLoadBalancerProfile()
}

if s.APIServerAccessProfile != nil {
spec.ApiServerAccessProfile = &asocontainerservicev1.ManagedClusterAPIServerAccessProfile{
managedCluster.Spec.ApiServerAccessProfile = &asocontainerservicev1.ManagedClusterAPIServerAccessProfile{
EnablePrivateCluster: s.APIServerAccessProfile.EnablePrivateCluster,
PrivateDNSZone: s.APIServerAccessProfile.PrivateDNSZone,
EnablePrivateClusterPublicFQDN: s.APIServerAccessProfile.EnablePrivateClusterPublicFQDN,
}

if s.APIServerAccessProfile.AuthorizedIPRanges != nil {
spec.ApiServerAccessProfile.AuthorizedIPRanges = s.APIServerAccessProfile.AuthorizedIPRanges
managedCluster.Spec.ApiServerAccessProfile.AuthorizedIPRanges = s.APIServerAccessProfile.AuthorizedIPRanges
}
}

if s.OutboundType != nil {
spec.NetworkProfile.OutboundType = ptr.To(asocontainerservicev1.ContainerServiceNetworkProfile_OutboundType(*s.OutboundType))
managedCluster.Spec.NetworkProfile.OutboundType = ptr.To(asocontainerservicev1.ContainerServiceNetworkProfile_OutboundType(*s.OutboundType))
}

if s.Identity != nil {
spec.Identity, err = getIdentity(s.Identity)
managedCluster.Spec.Identity, err = getIdentity(s.Identity)
if err != nil {
return nil, errors.Wrapf(err, "Identity is not valid: %s", err)
}
}

if s.KubeletUserAssignedIdentity != "" {
spec.IdentityProfile = map[string]asocontainerservicev1.UserAssignedIdentity{
managedCluster.Spec.IdentityProfile = map[string]asocontainerservicev1.UserAssignedIdentity{
kubeletIdentityKey: {
ResourceReference: &genruntime.ResourceReference{
ARMID: s.KubeletUserAssignedIdentity,
Expand All @@ -475,40 +474,40 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
}

if s.HTTPProxyConfig != nil {
spec.HttpProxyConfig = &asocontainerservicev1.ManagedClusterHTTPProxyConfig{
managedCluster.Spec.HttpProxyConfig = &asocontainerservicev1.ManagedClusterHTTPProxyConfig{
HttpProxy: s.HTTPProxyConfig.HTTPProxy,
HttpsProxy: s.HTTPProxyConfig.HTTPSProxy,
TrustedCa: s.HTTPProxyConfig.TrustedCA,
}

if s.HTTPProxyConfig.NoProxy != nil {
spec.HttpProxyConfig.NoProxy = s.HTTPProxyConfig.NoProxy
managedCluster.Spec.HttpProxyConfig.NoProxy = s.HTTPProxyConfig.NoProxy
}
}

if s.OIDCIssuerProfile != nil {
spec.OidcIssuerProfile = &asocontainerservicev1.ManagedClusterOIDCIssuerProfile{
managedCluster.Spec.OidcIssuerProfile = &asocontainerservicev1.ManagedClusterOIDCIssuerProfile{
Enabled: s.OIDCIssuerProfile.Enabled,
}
}

// Only include AgentPoolProfiles during initial cluster creation. Agent pools are managed solely by the
// AzureManagedMachinePool controller thereafter.
spec.AgentPoolProfiles = nil
managedCluster.Spec.AgentPoolProfiles = nil
if managedCluster.Status.AgentPoolProfiles == nil {
// Add all agent pools to cluster spec that will be submitted to the API
agentPoolSpecs, err := s.GetAllAgentPools()
if err != nil {
return nil, errors.Wrapf(err, "failed to get agent pool specs for managed cluster %s", s.Name)
}

for _, spec := range agentPoolSpecs {
agentPool, err := spec.Parameters(ctx, nil)
for _, agentPoolSpec := range agentPoolSpecs {
agentPool, err := agentPoolSpec.Parameters(ctx, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to get agent pool parameters for managed cluster %s", s.Name)
}
agentPoolSpec := spec.(*agentpools.AgentPoolSpec)
agentPool.Spec.AzureName = agentPoolSpec.AzureName
agentPoolSpecTyped := agentPoolSpec.(*agentpools.AgentPoolSpec)
agentPool.Spec.AzureName = agentPoolSpecTyped.AzureName
profile := converters.AgentPoolToManagedClusterAgentPoolProfile(agentPool)
managedCluster.Spec.AgentPoolProfiles = append(managedCluster.Spec.AgentPoolProfiles, profile)
}
Expand All @@ -517,7 +516,7 @@ func (s *ManagedClusterSpec) Parameters(ctx context.Context, existing *asocontai
if managedCluster.Status.Tags != nil {
// tags managed separately because updating tags concurrently with agent pools' can cause the cluster
// to get stuck in an "Updating" state forever.
spec.Tags = nil
managedCluster.Spec.Tags = nil
}

return managedCluster, nil
Expand Down

0 comments on commit ed6e3f9

Please sign in to comment.