diff --git a/.github/workflows/build-x86-image.yaml b/.github/workflows/build-x86-image.yaml index 7bfceab7b91..5190fc0cdce 100644 --- a/.github/workflows/build-x86-image.yaml +++ b/.github/workflows/build-x86-image.yaml @@ -830,7 +830,7 @@ jobs: - build-kube-ovn - build-e2e-binaries runs-on: ubuntu-22.04 - timeout-minutes: 25 + timeout-minutes: 30 strategy: fail-fast: false matrix: diff --git a/charts/templates/kube-ovn-crd.yaml b/charts/templates/kube-ovn-crd.yaml index eec396c6e9b..1e0499707c7 100644 --- a/charts/templates/kube-ovn-crd.yaml +++ b/charts/templates/kube-ovn-crd.yaml @@ -1767,6 +1767,122 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + name: ippools.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Subnet + type: string + jsonPath: .spec.subnet + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: IPs + type: string + jsonPath: .spec.ips + - name: V4Used + type: number + jsonPath: .status.v4UsingIPs + - name: V4Available + type: number + jsonPath: .status.v4AvailableIPs + - name: V6Used + type: number + jsonPath: .status.v6UsingIPs + - name: V6Available + type: number + jsonPath: .status.v6AvailableIPs + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + subnet: + type: string + x-kubernetes-validations: + - rule: "self == oldSelf" + message: "This field is immutable." + namespaces: + type: array + x-kubernetes-list-type: set + items: + type: string + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + ips: + type: array + minItems: 1 + x-kubernetes-list-type: set + items: + type: string + anyOf: + - format: ipv4 + - format: ipv6 + - format: cidr + - pattern: ^(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.\.(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])$ + - pattern: ^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))\.\.((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))$ + required: + - subnet + - ips + status: + type: object + properties: + v4AvailableIPs: + type: number + v4UsingIPs: + type: number + v6AvailableIPs: + type: number + v6UsingIPs: + type: number + v4AvailableIPRange: + type: string + v4UsingIPRange: + type: string + v6AvailableIPRange: + type: string + v6UsingIPRange: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + scope: Cluster + names: + plural: ippools + singular: ippool + kind: IPPool + shortNames: + - ippool +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: name: vlans.kubeovn.io spec: diff --git a/charts/templates/ovn-CR.yaml b/charts/templates/ovn-CR.yaml index 0881987d715..2aa39184d54 100644 --- a/charts/templates/ovn-CR.yaml +++ b/charts/templates/ovn-CR.yaml @@ -15,6 +15,8 @@ rules: - vpc-nat-gateways/status - subnets - subnets/status + - ippools + - ippools/status - ips - vips - vips/status diff --git a/dist/images/cleanup.sh b/dist/images/cleanup.sh index cda3c527efc..40328ed4adf 100644 --- a/dist/images/cleanup.sh +++ b/dist/images/cleanup.sh @@ -54,6 +54,10 @@ for slr in $(kubectl get switch-lb-rule -o name); do kubectl delete --ignore-not-found $slr done +for ippool in $(kubectl get ippool -o name); do + kubectl delete --ignore-not-found $ippool +done + set +e for subnet in $(kubectl get subnet -o name); do kubectl patch "$subnet" --type='json' -p '[{"op": "replace", "path": "/metadata/finalizers", "value": []}]' @@ -108,12 +112,27 @@ kubectl delete --ignore-not-found clusterrolebinding vpc-dns kubectl delete --ignore-not-found sa vpc-dns -n kube-system # delete CRD -kubectl delete --ignore-not-found crd htbqoses.kubeovn.io security-groups.kubeovn.io ips.kubeovn.io subnets.kubeovn.io \ - vpc-nat-gateways.kubeovn.io vpcs.kubeovn.io vlans.kubeovn.io provider-networks.kubeovn.io \ - iptables-dnat-rules.kubeovn.io iptables-eips.kubeovn.io iptables-fip-rules.kubeovn.io \ - iptables-snat-rules.kubeovn.io vips.kubeovn.io switch-lb-rules.kubeovn.io vpc-dnses.kubeovn.io \ - ovn-eips.kubeovn.io ovn-fips.kubeovn.io ovn-snat-rules.kubeovn.io ovn-dnat-rules.kubeovn.io \ - qos-policies.kubeovn.io +kubectl delete --ignore-not-found crd \ + htbqoses.kubeovn.io \ + security-groups.kubeovn.io \ + ips.kubeovn.io \ + ippools.kubeovn.io \ + subnets.kubeovn.io \ + vpc-nat-gateways.kubeovn.io \ + vpcs.kubeovn.io \ + vlans.kubeovn.io \ + provider-networks.kubeovn.io \ + iptables-dnat-rules.kubeovn.io \ + iptables-eips.kubeovn.io \ + iptables-fip-rules.kubeovn.io \ + iptables-snat-rules.kubeovn.io \ + vips.kubeovn.io \ + switch-lb-rules.kubeovn.io \ + vpc-dnses.kubeovn.io \ + ovn-eips.kubeovn.io ovn-fips.kubeovn.io \ + ovn-snat-rules.kubeovn.io \ + ovn-dnat-rules.kubeovn.io \ + qos-policies.kubeovn.io # Remove annotations/labels in namespaces and nodes kubectl annotate no --all ovn.kubernetes.io/cidr- diff --git a/dist/images/install.sh b/dist/images/install.sh index 434c7aafb73..177cfb7fce5 100755 --- a/dist/images/install.sh +++ b/dist/images/install.sh @@ -2305,6 +2305,122 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + name: ippools.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Subnet + type: string + jsonPath: .spec.subnet + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: IPs + type: string + jsonPath: .spec.ips + - name: V4Used + type: number + jsonPath: .status.v4UsingIPs + - name: V4Available + type: number + jsonPath: .status.v4AvailableIPs + - name: V6Used + type: number + jsonPath: .status.v6UsingIPs + - name: V6Available + type: number + jsonPath: .status.v6AvailableIPs + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + subnet: + type: string + x-kubernetes-validations: + - rule: "self == oldSelf" + message: "This field is immutable." + namespaces: + type: array + x-kubernetes-list-type: set + items: + type: string + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + ips: + type: array + minItems: 1 + x-kubernetes-list-type: set + items: + type: string + anyOf: + - format: ipv4 + - format: ipv6 + - format: cidr + - pattern: ^(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.\.(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])$ + - pattern: ^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))\.\.((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))$ + required: + - subnet + - ips + status: + type: object + properties: + v4AvailableIPs: + type: number + v4UsingIPs: + type: number + v6AvailableIPs: + type: number + v6UsingIPs: + type: number + v4AvailableIPRange: + type: string + v4UsingIPRange: + type: string + v6AvailableIPRange: + type: string + v6UsingIPRange: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + scope: Cluster + names: + plural: ippools + singular: ippool + kind: IPPool + shortNames: + - ippool +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: name: vlans.kubeovn.io spec: @@ -2742,6 +2858,8 @@ rules: - vpc-nat-gateways/status - subnets - subnets/status + - ippools + - ippools/status - ips - vips - vips/status diff --git a/pkg/apis/kubeovn/v1/condition.go b/pkg/apis/kubeovn/v1/condition.go index 751cd0c19a0..4c67bd86609 100644 --- a/pkg/apis/kubeovn/v1/condition.go +++ b/pkg/apis/kubeovn/v1/condition.go @@ -160,6 +160,110 @@ func (m *SubnetStatus) ClearAllConditions() { } } +func (m *IPPoolStatus) addCondition(ctype ConditionType, status corev1.ConditionStatus, reason, message string) { + now := metav1.Now() + c := &IPPoolCondition{ + Type: ctype, + LastUpdateTime: now, + LastTransitionTime: now, + Status: status, + Reason: reason, + Message: message, + } + m.Conditions = append(m.Conditions, *c) +} + +// setConditionValue updates or creates a new condition +func (m *IPPoolStatus) setConditionValue(ctype ConditionType, status corev1.ConditionStatus, reason, message string) { + var c *IPPoolCondition + for i := range m.Conditions { + if m.Conditions[i].Type == ctype { + c = &m.Conditions[i] + } + } + if c == nil { + m.addCondition(ctype, status, reason, message) + } else { + // check message ? + if c.Status == status && c.Reason == reason && c.Message == message { + return + } + now := metav1.Now() + c.LastUpdateTime = now + if c.Status != status { + c.LastTransitionTime = now + } + c.Status = status + c.Reason = reason + c.Message = message + } +} + +// GetCondition get existing condition +func (m *IPPoolStatus) GetCondition(ctype ConditionType) *IPPoolCondition { + for i := range m.Conditions { + if m.Conditions[i].Type == ctype { + return &m.Conditions[i] + } + } + return nil +} + +// EnsureCondition useful for adding default conditions +func (m *IPPoolStatus) EnsureCondition(ctype ConditionType) { + if c := m.GetCondition(ctype); c != nil { + return + } + m.addCondition(ctype, corev1.ConditionUnknown, ReasonInit, "Not Observed") +} + +// EnsureStandardConditions - helper to inject standard conditions +func (m *IPPoolStatus) EnsureStandardConditions() { + m.EnsureCondition(Ready) + m.EnsureCondition(Error) +} + +// SetCondition updates or creates a new condition +func (m *IPPoolStatus) SetCondition(ctype ConditionType, reason, message string) { + m.setConditionValue(ctype, corev1.ConditionTrue, reason, message) +} + +// ClearCondition updates or creates a new condition +func (m *IPPoolStatus) ClearCondition(ctype ConditionType, reason, message string) { + m.setConditionValue(ctype, corev1.ConditionFalse, reason, message) +} + +// Ready - shortcut to set ready condition to true +func (m *IPPoolStatus) Ready(reason, message string) { + m.SetCondition(Ready, reason, message) +} + +// NotReady - shortcut to set ready condition to false +func (m *IPPoolStatus) NotReady(reason, message string) { + m.ClearCondition(Ready, reason, message) +} + +// SetError - shortcut to set error condition +func (m *IPPoolStatus) SetError(reason, message string) { + m.SetCondition(Error, reason, message) +} + +// ClearError - shortcut to set error condition +func (m *IPPoolStatus) ClearError() { + m.ClearCondition(Error, "NoError", "No error seen") +} + +// IsConditionTrue - if condition is true +func (m *IPPoolStatus) IsConditionTrue(ctype ConditionType) bool { + if c := m.GetCondition(ctype); c != nil { + return c.Status == corev1.ConditionTrue + } + return false +} + +// IsReady returns true if ready condition is set +func (m *IPPoolStatus) IsReady() bool { return m.IsConditionTrue(Ready) } + // SetVlanError - shortcut to set error condition func (v *VlanStatus) SetVlanError(reason, message string) { v.SetVlanCondition(Error, reason, message) @@ -211,13 +315,15 @@ func (v *VlanStatus) addVlanCondition(ctype ConditionType, status corev1.Conditi func (s *ProviderNetworkStatus) addNodeCondition(node string, ctype ConditionType, status corev1.ConditionStatus, reason, message string) { now := metav1.Now() c := &ProviderNetworkCondition{ - Node: node, - Type: ctype, - LastUpdateTime: now, - LastTransitionTime: now, - Status: status, - Reason: reason, - Message: message, + Node: node, + Condition: Condition{ + Type: ctype, + LastUpdateTime: now, + LastTransitionTime: now, + Status: status, + Reason: reason, + Message: message, + }, } s.Conditions = append(s.Conditions, *c) } diff --git a/pkg/apis/kubeovn/v1/register.go b/pkg/apis/kubeovn/v1/register.go index 56ad0a685c6..d974c1b3903 100644 --- a/pkg/apis/kubeovn/v1/register.go +++ b/pkg/apis/kubeovn/v1/register.go @@ -33,6 +33,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &IP{}, &IPList{}, + &IPPool{}, + &IPPoolList{}, &Subnet{}, &SubnetList{}, &Vlan{}, diff --git a/pkg/apis/kubeovn/v1/status.go b/pkg/apis/kubeovn/v1/status.go index 28f87ff69bf..90241aa74cd 100644 --- a/pkg/apis/kubeovn/v1/status.go +++ b/pkg/apis/kubeovn/v1/status.go @@ -7,6 +7,16 @@ import ( "k8s.io/klog/v2" ) +func (s *IPPoolStatus) Bytes() ([]byte, error) { + bytes, err := json.Marshal(s) + if err != nil { + return nil, err + } + newStr := fmt.Sprintf(`{"status": %s}`, string(bytes)) + klog.V(5).Info("status body", newStr) + return []byte(newStr), nil +} + func (ss *SubnetStatus) Bytes() ([]byte, error) { // {"availableIPs":65527,"usingIPs":9} => {"status": {"availableIPs":65527,"usingIPs":9}} bytes, err := json.Marshal(ss) diff --git a/pkg/apis/kubeovn/v1/types.go b/pkg/apis/kubeovn/v1/types.go index b31393392f1..27feb7ba09a 100644 --- a/pkg/apis/kubeovn/v1/types.go +++ b/pkg/apis/kubeovn/v1/types.go @@ -194,9 +194,9 @@ type NatOutGoingPolicyMatch struct { // ConditionType encodes information on the condition type ConditionType string -// SubnetCondition describes the state of an object at a certain point. +// Condition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type SubnetCondition struct { +type Condition struct { // Type of condition. Type ConditionType `json:"type"` // Status of the condition, one of True, False, Unknown. @@ -215,6 +215,10 @@ type SubnetCondition struct { LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` } +// SubnetCondition describes the state of an object at a certain point. +// +k8s:deepcopy-gen=true +type SubnetCondition Condition + type SubnetStatus struct { // Conditions represents the latest state of the object // +optional @@ -251,6 +255,53 @@ type SubnetList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient:nonNamespaced +type IPPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IPPoolSpec `json:"spec"` + Status IPPoolStatus `json:"status,omitempty"` +} + +type IPPoolSpec struct { + Subnet string `json:"subnet,omitempty"` + Protocol string `json:"protocol,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + IPs []string `json:"ips,omitempty"` +} + +// IPPoolCondition describes the state of an object at a certain point. +// +k8s:deepcopy-gen=true +type IPPoolCondition Condition + +type IPPoolStatus struct { + V4AvailableIPs float64 `json:"v4AvailableIPs"` + V4AvailableIPRange string `json:"v4AvailableIPRange"` + V4UsingIPs float64 `json:"v4UsingIPs"` + V4UsingIPRange string `json:"v4UsingIPRange"` + V6AvailableIPs float64 `json:"v6AvailableIPs"` + V6AvailableIPRange string `json:"v6AvailableIPRange"` + V6UsingIPs float64 `json:"v6UsingIPs"` + V6UsingIPRange string `json:"v6UsingIPRange"` + + // Conditions represents the latest state of the object + // +optional + Conditions []IPPoolCondition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IPPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []IPPool `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + type Vlan struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -280,24 +331,7 @@ type VlanStatus struct { // VlanCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type VlanCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type VlanCondition Condition // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -355,22 +389,7 @@ type ProviderNetworkStatus struct { type ProviderNetworkCondition struct { // Node name Node string `json:"node"` - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Condition } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -466,24 +485,7 @@ type VpcStatus struct { // VpcCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type VpcCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type VpcCondition Condition // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -549,24 +551,7 @@ type IptablesEipSpec struct { // IptablesEIPCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type IptablesEIPCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type IptablesEIPCondition Condition type IptablesEipStatus struct { // +optional @@ -611,24 +596,7 @@ type IptablesFIPRuleSpec struct { // IptablesFIPRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type IptablesFIPRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type IptablesFIPRuleCondition Condition type IptablesFIPRuleStatus struct { // +optional @@ -675,24 +643,7 @@ type IptablesSnatRuleSpec struct { // IptablesSnatRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type IptablesSnatRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type IptablesSnatRuleCondition Condition type IptablesSnatRuleStatus struct { // +optional @@ -742,24 +693,7 @@ type IptablesDnatRuleSpec struct { // IptablesDnatRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type IptablesDnatRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type IptablesDnatRuleCondition Condition type IptablesDnatRuleStatus struct { // +optional @@ -876,24 +810,7 @@ type VipSpec struct { // VipCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type VipCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type VipCondition Condition type VipStatus struct { // Conditions represents the latest state of the object @@ -959,24 +876,7 @@ type VpcDnsStatus struct { // VpcDnsCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type VpcDnsCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type VpcDnsCondition Condition type SlrPort struct { Name string `json:"name"` @@ -1029,24 +929,7 @@ type SwitchLBRuleList struct { // SwitchLBRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type SwitchLBRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type SwitchLBRuleCondition Condition // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -1071,24 +954,7 @@ type OvnEipSpec struct { // OvnEipCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type OvnEipCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type OvnEipCondition Condition type OvnEipStatus struct { // Conditions represents the latest state of the object @@ -1133,24 +999,7 @@ type OvnFipSpec struct { // OvnFipCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type OvnFipCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type OvnFipCondition Condition type OvnFipStatus struct { // +optional @@ -1198,24 +1047,7 @@ type OvnSnatRuleSpec struct { // OvnSnatRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type OvnSnatRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type OvnSnatRuleCondition Condition type OvnSnatRuleStatus struct { // +optional @@ -1265,24 +1097,7 @@ type OvnDnatRuleSpec struct { // OvnDnatRuleCondition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type OvnDnatRuleCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type OvnDnatRuleCondition Condition // +k8s:deepcopy-gen=true type OvnDnatRuleStatus struct { @@ -1334,24 +1149,7 @@ type QoSPolicySpec struct { // Condition describes the state of an object at a certain point. // +k8s:deepcopy-gen=true -type QoSPolicyCondition struct { - // Type of condition. - Type ConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty"` - // Last time the condition was probed - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` -} +type QoSPolicyCondition Condition // BandwidthLimitRule describes the rule of an bandwidth limit. type QoSPolicyBandwidthLimitRule struct { diff --git a/pkg/apis/kubeovn/v1/zz_generated.deepcopy.go b/pkg/apis/kubeovn/v1/zz_generated.deepcopy.go index 571bf15971c..d00e2c8c08c 100644 --- a/pkg/apis/kubeovn/v1/zz_generated.deepcopy.go +++ b/pkg/apis/kubeovn/v1/zz_generated.deepcopy.go @@ -42,6 +42,24 @@ func (in *Acl) DeepCopy() *Acl { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomInterface) DeepCopyInto(out *CustomInterface) { *out = *in @@ -123,6 +141,134 @@ func (in *IPList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPool) DeepCopyInto(out *IPPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool. +func (in *IPPool) DeepCopy() *IPPool { + if in == nil { + return nil + } + out := new(IPPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolCondition) DeepCopyInto(out *IPPoolCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolCondition. +func (in *IPPoolCondition) DeepCopy() *IPPoolCondition { + if in == nil { + return nil + } + out := new(IPPoolCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolList) DeepCopyInto(out *IPPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolList. +func (in *IPPoolList) DeepCopy() *IPPoolList { + if in == nil { + return nil + } + out := new(IPPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IPs != nil { + in, out := &in.IPs, &out.IPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolSpec. +func (in *IPPoolSpec) DeepCopy() *IPPoolSpec { + if in == nil { + return nil + } + out := new(IPPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolStatus) DeepCopyInto(out *IPPoolStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]IPPoolCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolStatus. +func (in *IPPoolStatus) DeepCopy() *IPPoolStatus { + if in == nil { + return nil + } + out := new(IPPoolStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPSpec) DeepCopyInto(out *IPSpec) { *out = *in @@ -626,6 +772,56 @@ func (in *IptablesSnatRuleStatus) DeepCopy() *IptablesSnatRuleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NatOutGoingPolicyMatch) DeepCopyInto(out *NatOutGoingPolicyMatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NatOutGoingPolicyMatch. +func (in *NatOutGoingPolicyMatch) DeepCopy() *NatOutGoingPolicyMatch { + if in == nil { + return nil + } + out := new(NatOutGoingPolicyMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NatOutgoingPolicyRule) DeepCopyInto(out *NatOutgoingPolicyRule) { + *out = *in + out.Match = in.Match + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NatOutgoingPolicyRule. +func (in *NatOutgoingPolicyRule) DeepCopy() *NatOutgoingPolicyRule { + if in == nil { + return nil + } + out := new(NatOutgoingPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NatOutgoingPolicyRuleStatus) DeepCopyInto(out *NatOutgoingPolicyRuleStatus) { + *out = *in + out.NatOutgoingPolicyRule = in.NatOutgoingPolicyRule + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NatOutgoingPolicyRuleStatus. +func (in *NatOutgoingPolicyRuleStatus) DeepCopy() *NatOutgoingPolicyRuleStatus { + if in == nil { + return nil + } + out := new(NatOutgoingPolicyRuleStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OvnDnatRule) DeepCopyInto(out *OvnDnatRule) { *out = *in @@ -1145,8 +1341,7 @@ func (in *ProviderNetwork) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProviderNetworkCondition) DeepCopyInto(out *ProviderNetworkCondition) { *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.Condition.DeepCopyInto(&out.Condition) return } @@ -1711,6 +1906,11 @@ func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { *out = make([]Acl, len(*in)) copy(*out, *in) } + if in.NatOutgoingPolicyRules != nil { + in, out := &in.NatOutgoingPolicyRules, &out.NatOutgoingPolicyRules + *out = make([]NatOutgoingPolicyRule, len(*in)) + copy(*out, *in) + } if in.EnableLb != nil { in, out := &in.EnableLb, &out.EnableLb *out = new(bool) @@ -1739,6 +1939,11 @@ func (in *SubnetStatus) DeepCopyInto(out *SubnetStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NatOutgoingPolicyRules != nil { + in, out := &in.NatOutgoingPolicyRules, &out.NatOutgoingPolicyRules + *out = make([]NatOutgoingPolicyRuleStatus, len(*in)) + copy(*out, *in) + } return } @@ -1839,6 +2044,11 @@ func (in *SwitchLBRuleSpec) DeepCopyInto(out *SwitchLBRuleSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Endpoints != nil { + in, out := &in.Endpoints, &out.Endpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]SlrPort, len(*in)) diff --git a/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_ippool.go b/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_ippool.go new file mode 100644 index 00000000000..1b538ab4b6c --- /dev/null +++ b/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_ippool.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIPPools implements IPPoolInterface +type FakeIPPools struct { + Fake *FakeKubeovnV1 +} + +var ippoolsResource = schema.GroupVersionResource{Group: "kubeovn.io", Version: "v1", Resource: "ippools"} + +var ippoolsKind = schema.GroupVersionKind{Group: "kubeovn.io", Version: "v1", Kind: "IPPool"} + +// Get takes name of the iPPool, and returns the corresponding iPPool object, and an error if there is any. +func (c *FakeIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *kubeovnv1.IPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(ippoolsResource, name), &kubeovnv1.IPPool{}) + if obj == nil { + return nil, err + } + return obj.(*kubeovnv1.IPPool), err +} + +// List takes label and field selectors, and returns the list of IPPools that match those selectors. +func (c *FakeIPPools) List(ctx context.Context, opts v1.ListOptions) (result *kubeovnv1.IPPoolList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(ippoolsResource, ippoolsKind, opts), &kubeovnv1.IPPoolList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &kubeovnv1.IPPoolList{ListMeta: obj.(*kubeovnv1.IPPoolList).ListMeta} + for _, item := range obj.(*kubeovnv1.IPPoolList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested iPPools. +func (c *FakeIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(ippoolsResource, opts)) +} + +// Create takes the representation of a iPPool and creates it. Returns the server's representation of the iPPool, and an error, if there is any. +func (c *FakeIPPools) Create(ctx context.Context, iPPool *kubeovnv1.IPPool, opts v1.CreateOptions) (result *kubeovnv1.IPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(ippoolsResource, iPPool), &kubeovnv1.IPPool{}) + if obj == nil { + return nil, err + } + return obj.(*kubeovnv1.IPPool), err +} + +// Update takes the representation of a iPPool and updates it. Returns the server's representation of the iPPool, and an error, if there is any. +func (c *FakeIPPools) Update(ctx context.Context, iPPool *kubeovnv1.IPPool, opts v1.UpdateOptions) (result *kubeovnv1.IPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(ippoolsResource, iPPool), &kubeovnv1.IPPool{}) + if obj == nil { + return nil, err + } + return obj.(*kubeovnv1.IPPool), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIPPools) UpdateStatus(ctx context.Context, iPPool *kubeovnv1.IPPool, opts v1.UpdateOptions) (*kubeovnv1.IPPool, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(ippoolsResource, "status", iPPool), &kubeovnv1.IPPool{}) + if obj == nil { + return nil, err + } + return obj.(*kubeovnv1.IPPool), err +} + +// Delete takes name of the iPPool and deletes it. Returns an error if one occurs. +func (c *FakeIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(ippoolsResource, name, opts), &kubeovnv1.IPPool{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(ippoolsResource, listOpts) + + _, err := c.Fake.Invokes(action, &kubeovnv1.IPPoolList{}) + return err +} + +// Patch applies the patch and returns the patched iPPool. +func (c *FakeIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *kubeovnv1.IPPool, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(ippoolsResource, name, pt, data, subresources...), &kubeovnv1.IPPool{}) + if obj == nil { + return nil, err + } + return obj.(*kubeovnv1.IPPool), err +} diff --git a/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_kubeovn_client.go b/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_kubeovn_client.go index 5485f3fe2b8..be653ef62ab 100644 --- a/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_kubeovn_client.go +++ b/pkg/client/clientset/versioned/typed/kubeovn/v1/fake/fake_kubeovn_client.go @@ -32,6 +32,10 @@ func (c *FakeKubeovnV1) IPs() v1.IPInterface { return &FakeIPs{c} } +func (c *FakeKubeovnV1) IPPools() v1.IPPoolInterface { + return &FakeIPPools{c} +} + func (c *FakeKubeovnV1) IptablesDnatRules() v1.IptablesDnatRuleInterface { return &FakeIptablesDnatRules{c} } diff --git a/pkg/client/clientset/versioned/typed/kubeovn/v1/generated_expansion.go b/pkg/client/clientset/versioned/typed/kubeovn/v1/generated_expansion.go index a6abce8cb5d..15876f7e55b 100644 --- a/pkg/client/clientset/versioned/typed/kubeovn/v1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/kubeovn/v1/generated_expansion.go @@ -20,6 +20,8 @@ package v1 type IPExpansion interface{} +type IPPoolExpansion interface{} + type IptablesDnatRuleExpansion interface{} type IptablesEIPExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/kubeovn/v1/ippool.go b/pkg/client/clientset/versioned/typed/kubeovn/v1/ippool.go new file mode 100644 index 00000000000..bd6c7bff6bf --- /dev/null +++ b/pkg/client/clientset/versioned/typed/kubeovn/v1/ippool.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + scheme "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// IPPoolsGetter has a method to return a IPPoolInterface. +// A group's client should implement this interface. +type IPPoolsGetter interface { + IPPools() IPPoolInterface +} + +// IPPoolInterface has methods to work with IPPool resources. +type IPPoolInterface interface { + Create(ctx context.Context, iPPool *v1.IPPool, opts metav1.CreateOptions) (*v1.IPPool, error) + Update(ctx context.Context, iPPool *v1.IPPool, opts metav1.UpdateOptions) (*v1.IPPool, error) + UpdateStatus(ctx context.Context, iPPool *v1.IPPool, opts metav1.UpdateOptions) (*v1.IPPool, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IPPool, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.IPPoolList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IPPool, err error) + IPPoolExpansion +} + +// iPPools implements IPPoolInterface +type iPPools struct { + client rest.Interface +} + +// newIPPools returns a IPPools +func newIPPools(c *KubeovnV1Client) *iPPools { + return &iPPools{ + client: c.RESTClient(), + } +} + +// Get takes name of the iPPool, and returns the corresponding iPPool object, and an error if there is any. +func (c *iPPools) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IPPool, err error) { + result = &v1.IPPool{} + err = c.client.Get(). + Resource("ippools"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IPPools that match those selectors. +func (c *iPPools) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IPPoolList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.IPPoolList{} + err = c.client.Get(). + Resource("ippools"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested iPPools. +func (c *iPPools) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("ippools"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a iPPool and creates it. Returns the server's representation of the iPPool, and an error, if there is any. +func (c *iPPools) Create(ctx context.Context, iPPool *v1.IPPool, opts metav1.CreateOptions) (result *v1.IPPool, err error) { + result = &v1.IPPool{} + err = c.client.Post(). + Resource("ippools"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPPool). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a iPPool and updates it. Returns the server's representation of the iPPool, and an error, if there is any. +func (c *iPPools) Update(ctx context.Context, iPPool *v1.IPPool, opts metav1.UpdateOptions) (result *v1.IPPool, err error) { + result = &v1.IPPool{} + err = c.client.Put(). + Resource("ippools"). + Name(iPPool.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPPool). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *iPPools) UpdateStatus(ctx context.Context, iPPool *v1.IPPool, opts metav1.UpdateOptions) (result *v1.IPPool, err error) { + result = &v1.IPPool{} + err = c.client.Put(). + Resource("ippools"). + Name(iPPool.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(iPPool). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the iPPool and deletes it. Returns an error if one occurs. +func (c *iPPools) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("ippools"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *iPPools) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("ippools"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched iPPool. +func (c *iPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IPPool, err error) { + result = &v1.IPPool{} + err = c.client.Patch(pt). + Resource("ippools"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/kubeovn/v1/kubeovn_client.go b/pkg/client/clientset/versioned/typed/kubeovn/v1/kubeovn_client.go index b57353e036c..bd9b017ea8c 100644 --- a/pkg/client/clientset/versioned/typed/kubeovn/v1/kubeovn_client.go +++ b/pkg/client/clientset/versioned/typed/kubeovn/v1/kubeovn_client.go @@ -29,6 +29,7 @@ import ( type KubeovnV1Interface interface { RESTClient() rest.Interface IPsGetter + IPPoolsGetter IptablesDnatRulesGetter IptablesEIPsGetter IptablesFIPRulesGetter @@ -58,6 +59,10 @@ func (c *KubeovnV1Client) IPs() IPInterface { return newIPs(c) } +func (c *KubeovnV1Client) IPPools() IPPoolInterface { + return newIPPools(c) +} + func (c *KubeovnV1Client) IptablesDnatRules() IptablesDnatRuleInterface { return newIptablesDnatRules(c) } diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 8e097c69213..61c4b5fc9fa 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=kubeovn.io, Version=v1 case v1.SchemeGroupVersion.WithResource("ips"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kubeovn().V1().IPs().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("ippools"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Kubeovn().V1().IPPools().Informer()}, nil case v1.SchemeGroupVersion.WithResource("iptables-dnat-rules"): return &genericInformer{resource: resource.GroupResource(), informer: f.Kubeovn().V1().IptablesDnatRules().Informer()}, nil case v1.SchemeGroupVersion.WithResource("iptables-eips"): diff --git a/pkg/client/informers/externalversions/kubeovn/v1/interface.go b/pkg/client/informers/externalversions/kubeovn/v1/interface.go index d21d4c441cd..2b13d05651d 100644 --- a/pkg/client/informers/externalversions/kubeovn/v1/interface.go +++ b/pkg/client/informers/externalversions/kubeovn/v1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // IPs returns a IPInformer. IPs() IPInformer + // IPPools returns a IPPoolInformer. + IPPools() IPPoolInformer // IptablesDnatRules returns a IptablesDnatRuleInformer. IptablesDnatRules() IptablesDnatRuleInformer // IptablesEIPs returns a IptablesEIPInformer. @@ -80,6 +82,11 @@ func (v *version) IPs() IPInformer { return &iPInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// IPPools returns a IPPoolInformer. +func (v *version) IPPools() IPPoolInformer { + return &iPPoolInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // IptablesDnatRules returns a IptablesDnatRuleInformer. func (v *version) IptablesDnatRules() IptablesDnatRuleInformer { return &iptablesDnatRuleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/pkg/client/informers/externalversions/kubeovn/v1/ippool.go b/pkg/client/informers/externalversions/kubeovn/v1/ippool.go new file mode 100644 index 00000000000..ea6559ef6f6 --- /dev/null +++ b/pkg/client/informers/externalversions/kubeovn/v1/ippool.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + versioned "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeovn/kube-ovn/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/kubeovn/kube-ovn/pkg/client/listers/kubeovn/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// IPPoolInformer provides access to a shared informer and lister for +// IPPools. +type IPPoolInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.IPPoolLister +} + +type iPPoolInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIPPoolInformer constructs a new informer for IPPool type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIPPoolInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIPPoolInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIPPoolInformer constructs a new informer for IPPool type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIPPoolInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KubeovnV1().IPPools().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.KubeovnV1().IPPools().Watch(context.TODO(), options) + }, + }, + &kubeovnv1.IPPool{}, + resyncPeriod, + indexers, + ) +} + +func (f *iPPoolInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIPPoolInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *iPPoolInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&kubeovnv1.IPPool{}, f.defaultInformer) +} + +func (f *iPPoolInformer) Lister() v1.IPPoolLister { + return v1.NewIPPoolLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/kubeovn/v1/expansion_generated.go b/pkg/client/listers/kubeovn/v1/expansion_generated.go index 21b626d8b5c..998c1ddaa14 100644 --- a/pkg/client/listers/kubeovn/v1/expansion_generated.go +++ b/pkg/client/listers/kubeovn/v1/expansion_generated.go @@ -22,6 +22,10 @@ package v1 // IPLister. type IPListerExpansion interface{} +// IPPoolListerExpansion allows custom methods to be added to +// IPPoolLister. +type IPPoolListerExpansion interface{} + // IptablesDnatRuleListerExpansion allows custom methods to be added to // IptablesDnatRuleLister. type IptablesDnatRuleListerExpansion interface{} diff --git a/pkg/client/listers/kubeovn/v1/ippool.go b/pkg/client/listers/kubeovn/v1/ippool.go new file mode 100644 index 00000000000..95058c4a38f --- /dev/null +++ b/pkg/client/listers/kubeovn/v1/ippool.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IPPoolLister helps list IPPools. +// All objects returned here must be treated as read-only. +type IPPoolLister interface { + // List lists all IPPools in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.IPPool, err error) + // Get retrieves the IPPool from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.IPPool, error) + IPPoolListerExpansion +} + +// iPPoolLister implements the IPPoolLister interface. +type iPPoolLister struct { + indexer cache.Indexer +} + +// NewIPPoolLister returns a new IPPoolLister. +func NewIPPoolLister(indexer cache.Indexer) IPPoolLister { + return &iPPoolLister{indexer: indexer} +} + +// List lists all IPPools in the indexer. +func (s *iPPoolLister) List(selector labels.Selector) (ret []*v1.IPPool, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.IPPool)) + }) + return ret, err +} + +// Get retrieves the IPPool from the index for a given name. +func (s *iPPoolLister) Get(name string) (*v1.IPPool, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("ippool"), name) + } + return obj.(*v1.IPPool), nil +} diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index 025903b8c1c..0969d71a855 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -105,6 +105,13 @@ type Controller struct { syncVirtualPortsQueue workqueue.RateLimitingInterface subnetKeyMutex keymutex.KeyMutex + ippoolLister kubeovnlister.IPPoolLister + ippoolSynced cache.InformerSynced + addOrUpdateIPPoolQueue workqueue.RateLimitingInterface + updateIPPoolStatusQueue workqueue.RateLimitingInterface + deleteIPPoolQueue workqueue.RateLimitingInterface + ippoolKeyMutex keymutex.KeyMutex + ipsLister kubeovnlister.IPLister ipSynced cache.InformerSynced @@ -267,6 +274,7 @@ func Run(ctx context.Context, config *Configuration) { vpcInformer := kubeovnInformerFactory.Kubeovn().V1().Vpcs() vpcNatGatewayInformer := kubeovnInformerFactory.Kubeovn().V1().VpcNatGateways() subnetInformer := kubeovnInformerFactory.Kubeovn().V1().Subnets() + ippoolInformer := kubeovnInformerFactory.Kubeovn().V1().IPPools() ipInformer := kubeovnInformerFactory.Kubeovn().V1().IPs() virtualIpInformer := kubeovnInformerFactory.Kubeovn().V1().Vips() iptablesEipInformer := kubeovnInformerFactory.Kubeovn().V1().IptablesEIPs() @@ -333,6 +341,13 @@ func Run(ctx context.Context, config *Configuration) { syncVirtualPortsQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "SyncVirtualPort"), subnetKeyMutex: keymutex.NewHashed(numKeyLocks), + ippoolLister: ippoolInformer.Lister(), + ippoolSynced: ippoolInformer.Informer().HasSynced, + addOrUpdateIPPoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "AddIPPool"), + updateIPPoolStatusQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UpdateIPPoolStatus"), + deleteIPPoolQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DeleteIPPool"), + ippoolKeyMutex: keymutex.NewHashed(numKeyLocks), + ipsLister: ipInformer.Lister(), ipSynced: ipInformer.Informer().HasSynced, @@ -493,7 +508,7 @@ func Run(ctx context.Context, config *Configuration) { controller.npsSynced = npInformer.Informer().HasSynced controller.updateNpQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UpdateNp") controller.deleteNpQueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DeleteNp") - controller.npKeyMutex = keymutex.NewHashed(128) + controller.npKeyMutex = keymutex.NewHashed(numKeyLocks) } defer controller.shutdown() @@ -588,6 +603,14 @@ func Run(ctx context.Context, config *Configuration) { util.LogFatalAndExit(err, "failed to add subnet event handler") } + if _, err = ippoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controller.enqueueAddIPPool, + UpdateFunc: controller.enqueueUpdateIPPool, + DeleteFunc: controller.enqueueDeleteIPPool, + }); err != nil { + util.LogFatalAndExit(err, "failed to add ippool event handler") + } + if _, err = ipInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAddOrDelIP, UpdateFunc: controller.enqueueUpdateIP, @@ -829,6 +852,9 @@ func (c *Controller) shutdown() { c.updateSubnetStatusQueue.ShutDown() c.syncVirtualPortsQueue.ShutDown() + c.addOrUpdateIPPoolQueue.ShutDown() + c.deleteIPPoolQueue.ShutDown() + c.addNodeQueue.ShutDown() c.updateNodeQueue.ShutDown() c.deleteNodeQueue.ShutDown() @@ -940,6 +966,7 @@ func (c *Controller) startWorkers(ctx context.Context) { // add default/join subnet and wait them ready go wait.Until(c.runAddSubnetWorker, time.Second, ctx.Done()) + go wait.Until(c.runAddIPPoolWorker, time.Second, ctx.Done()) go wait.Until(c.runAddVlanWorker, time.Second, ctx.Done()) go wait.Until(c.runAddNamespaceWorker, time.Second, ctx.Done()) err := wait.PollUntilContextCancel(ctx, 3*time.Second, true, func(_ context.Context) (done bool, err error) { @@ -1006,7 +1033,9 @@ func (c *Controller) startWorkers(ctx context.Context) { go wait.Until(c.runUpdatePodSecurityWorker, time.Second, ctx.Done()) go wait.Until(c.runDeleteSubnetWorker, time.Second, ctx.Done()) + go wait.Until(c.runDeleteIPPoolWorker, time.Second, ctx.Done()) go wait.Until(c.runUpdateSubnetStatusWorker, time.Second, ctx.Done()) + go wait.Until(c.runUpdateIPPoolStatusWorker, time.Second, ctx.Done()) go wait.Until(c.runSyncVirtualPortsWorker, time.Second, ctx.Done()) if c.config.EnableLb { diff --git a/pkg/controller/init.go b/pkg/controller/init.go index 900d645a817..79dfbe6b4c9 100644 --- a/pkg/controller/init.go +++ b/pkg/controller/init.go @@ -293,6 +293,17 @@ func (c *Controller) InitIPAM() error { } } + ippools, err := c.ippoolLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list ippool: %v", err) + return err + } + for _, ippool := range ippools { + if err = c.ipam.AddOrUpdateIPPool(ippool.Spec.Subnet, ippool.Name, ippool.Spec.IPs); err != nil { + klog.Errorf("failed to init ippool %s: %v", ippool.Name, err) + } + } + lsList, err := c.ovnClient.ListLogicalSwitch(false, nil) if err != nil { klog.Errorf("failed to list LS: %v", err) @@ -375,13 +386,12 @@ func (c *Controller) InitIPAM() error { portName := ovs.PodNameToPortName(podName, pod.Namespace, podNet.ProviderName) ip := pod.Annotations[fmt.Sprintf(util.IpAddressAnnotationTemplate, podNet.ProviderName)] mac := pod.Annotations[fmt.Sprintf(util.MacAddressAnnotationTemplate, podNet.ProviderName)] - subnet := pod.Annotations[fmt.Sprintf(util.LogicalSwitchAnnotationTemplate, podNet.ProviderName)] - _, _, _, err := c.ipam.GetStaticAddress(key, portName, ip, &mac, subnet, true) + _, _, _, err := c.ipam.GetStaticAddress(key, portName, ip, &mac, podNet.Subnet.Name, true) if err != nil { klog.Errorf("failed to init pod %s.%s address %s: %v", podName, pod.Namespace, pod.Annotations[fmt.Sprintf(util.IpAddressAnnotationTemplate, podNet.ProviderName)], err) } else { ipCR := ipsMap[portName] - err = c.createOrUpdateCrdIPs(podName, ip, mac, subnet, pod.Namespace, pod.Spec.NodeName, podNet.ProviderName, podType, &ipCR) + err = c.createOrUpdateCrdIPs(podName, ip, mac, podNet.Subnet.Name, pod.Namespace, pod.Spec.NodeName, podNet.ProviderName, podType, &ipCR) if err != nil { klog.Errorf("failed to create/update ips CR %s.%s with ip address %s: %v", podName, pod.Namespace, ip, err) } diff --git a/pkg/controller/ippool.go b/pkg/controller/ippool.go new file mode 100644 index 00000000000..340cc521d59 --- /dev/null +++ b/pkg/controller/ippool.go @@ -0,0 +1,291 @@ +package controller + +import ( + "context" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/util" +) + +func (c *Controller) enqueueAddIPPool(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + klog.V(3).Infof("enqueue add ippool %s", key) + c.addOrUpdateIPPoolQueue.Add(key) +} + +func (c *Controller) enqueueDeleteIPPool(obj interface{}) { + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + klog.V(3).Infof("enqueue delete ippool %s", key) + c.deleteIPPoolQueue.Add(obj) +} + +func (c *Controller) enqueueUpdateIPPool(old, new interface{}) { + oldIPPool := old.(*kubeovnv1.IPPool) + newIPPool := new.(*kubeovnv1.IPPool) + key, err := cache.MetaNamespaceKeyFunc(new) + if err != nil { + utilruntime.HandleError(err) + return + } + + if !reflect.DeepEqual(oldIPPool.Spec.Namespaces, newIPPool.Spec.Namespaces) || + !reflect.DeepEqual(oldIPPool.Spec.IPs, newIPPool.Spec.IPs) || + oldIPPool.Spec.Protocol != newIPPool.Spec.Protocol { + klog.V(3).Infof("enqueue update ippool %s", key) + c.addOrUpdateIPPoolQueue.Add(key) + } +} + +func (c *Controller) runAddIPPoolWorker() { + for c.processNextAddIPPoolWorkItem() { + } +} + +func (c *Controller) runUpdateIPPoolStatusWorker() { + for c.processNextUpdateIPPoolStatusWorkItem() { + } +} + +func (c *Controller) runDeleteIPPoolWorker() { + for c.processNextDeleteIPPoolWorkItem() { + } +} + +func (c *Controller) processNextAddIPPoolWorkItem() bool { + obj, shutdown := c.addOrUpdateIPPoolQueue.Get() + if shutdown { + return false + } + + err := func(obj interface{}) error { + defer c.addOrUpdateIPPoolQueue.Done(obj) + key, ok := obj.(string) + if !ok { + c.addOrUpdateIPPoolQueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + if err := c.handleAddOrUpdateIPPool(key); err != nil { + c.addOrUpdateIPPoolQueue.AddRateLimited(key) + return fmt.Errorf("error syncing ippool %q: %s, requeuing", key, err.Error()) + } + c.addOrUpdateIPPoolQueue.Forget(obj) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + return true +} + +func (c *Controller) processNextUpdateIPPoolStatusWorkItem() bool { + obj, shutdown := c.updateIPPoolStatusQueue.Get() + if shutdown { + return false + } + + err := func(obj interface{}) error { + defer c.updateIPPoolStatusQueue.Done(obj) + var key string + var ok bool + if key, ok = obj.(string); !ok { + c.updateIPPoolStatusQueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) + return nil + } + if err := c.handleUpdateIPPoolStatus(key); err != nil { + c.updateIPPoolStatusQueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + return true +} + +func (c *Controller) processNextDeleteIPPoolWorkItem() bool { + obj, shutdown := c.deleteIPPoolQueue.Get() + if shutdown { + return false + } + + err := func(obj interface{}) error { + defer c.deleteIPPoolQueue.Done(obj) + ippool, ok := obj.(*kubeovnv1.IPPool) + if !ok { + c.deleteIPPoolQueue.Forget(obj) + utilruntime.HandleError(fmt.Errorf("expected ippool in workqueue but got %#v", obj)) + return nil + } + if err := c.handleDeleteIPPool(ippool); err != nil { + c.deleteIPPoolQueue.AddRateLimited(obj) + return fmt.Errorf("error syncing ippool %q: %s, requeuing", ippool.Name, err.Error()) + } + c.deleteIPPoolQueue.Forget(obj) + return nil + }(obj) + + if err != nil { + utilruntime.HandleError(err) + return true + } + return true +} + +func (c *Controller) handleAddOrUpdateIPPool(key string) error { + c.ippoolKeyMutex.LockKey(key) + defer func() { _ = c.ippoolKeyMutex.UnlockKey(key) }() + + cachedIPPool, err := c.ippoolLister.Get(key) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + klog.Infof("handle add/update ippool %s", cachedIPPool.Name) + + ippool := cachedIPPool.DeepCopy() + ippool.Status.EnsureStandardConditions() + if err = c.ipam.AddOrUpdateIPPool(ippool.Spec.Subnet, ippool.Name, ippool.Spec.IPs); err != nil { + klog.Errorf("failed to add/update ippool %s with IPs %v in subnet %s: %v", ippool.Name, ippool.Spec.IPs, ippool.Spec.Subnet, err) + if patchErr := c.patchIPPoolStatus(ippool, "UpdateIPAMFailed", err.Error()); patchErr != nil { + klog.Error(patchErr) + } + return err + } + + v4a, v4u, v6a, v6u, v4as, v4us, v6as, v6us := c.ipam.IPPoolStatistics(ippool.Spec.Subnet, ippool.Name) + ippool.Status.V4AvailableIPs = v4a + ippool.Status.V4UsingIPs = v4u + ippool.Status.V6AvailableIPs = v6a + ippool.Status.V6UsingIPs = v6u + ippool.Status.V4AvailableIPRange = v4as + ippool.Status.V4UsingIPRange = v4us + ippool.Status.V6AvailableIPRange = v6as + ippool.Status.V6UsingIPRange = v6us + + if err = c.patchIPPoolStatus(ippool, "UpdateIPAMSucceeded", ""); err != nil { + klog.Error(err) + return err + } + + for _, ns := range ippool.Spec.Namespaces { + c.addNamespaceQueue.Add(ns) + } + + return nil +} + +func (c *Controller) handleDeleteIPPool(ippool *kubeovnv1.IPPool) error { + c.ippoolKeyMutex.LockKey(ippool.Name) + defer func() { _ = c.ippoolKeyMutex.UnlockKey(ippool.Name) }() + + klog.Infof("handle delete ippool %s", ippool.Name) + c.ipam.RemoveIPPool(ippool.Spec.Subnet, ippool.Name) + + namespaces, err := c.namespacesLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list namespaces: %v", err) + return err + } + + for _, ns := range namespaces { + if len(ns.Annotations) == 0 { + continue + } + if ns.Annotations[util.IpPoolAnnotation] == ippool.Name { + c.enqueueAddNamespace(ns) + } + } + + return nil +} + +func (c *Controller) handleUpdateIPPoolStatus(key string) error { + c.ippoolKeyMutex.LockKey(key) + defer func() { _ = c.ippoolKeyMutex.UnlockKey(key) }() + + cachedIPPool, err := c.ippoolLister.Get(key) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil + } + return err + } + + ippool := cachedIPPool.DeepCopy() + v4a, v4u, v6a, v6u, v4as, v4us, v6as, v6us := c.ipam.IPPoolStatistics(ippool.Spec.Subnet, ippool.Name) + ippool.Status.V4AvailableIPs = v4a + ippool.Status.V4UsingIPs = v4u + ippool.Status.V6AvailableIPs = v6a + ippool.Status.V6UsingIPs = v6u + ippool.Status.V4AvailableIPRange = v4as + ippool.Status.V4UsingIPRange = v4us + ippool.Status.V6AvailableIPRange = v6as + ippool.Status.V6UsingIPRange = v6us + if reflect.DeepEqual(ippool.Status, cachedIPPool.Status) { + return nil + } + + bytes, err := ippool.Status.Bytes() + if err != nil { + klog.Errorf("failed to generate json representation for status of ippool %s: %v", ippool.Name, err) + return err + } + _, err = c.config.KubeOvnClient.KubeovnV1().IPPools().Patch(context.Background(), ippool.Name, types.MergePatchType, bytes, metav1.PatchOptions{}, "status") + if err != nil { + klog.Errorf("failed to patch status of ippool %s: %v", ippool.Name, err) + return err + } + + return nil +} + +func (c Controller) patchIPPoolStatus(ippool *kubeovnv1.IPPool, reason, errMsg string) error { + if errMsg != "" { + ippool.Status.SetError(reason, errMsg) + ippool.Status.NotReady(reason, errMsg) + c.recorder.Eventf(ippool, corev1.EventTypeWarning, reason, errMsg) + } else { + ippool.Status.Ready(reason, "") + c.recorder.Eventf(ippool, corev1.EventTypeNormal, reason, errMsg) + } + + bytes, err := ippool.Status.Bytes() + if err != nil { + klog.Error(err) + return err + } + if _, err = c.config.KubeOvnClient.KubeovnV1().IPPools().Patch(context.Background(), ippool.Name, types.MergePatchType, bytes, metav1.PatchOptions{}, "status"); err != nil { + klog.Error("failed to patch status of ippool %s: %v", ippool.Name, err) + return err + } + + return nil +} diff --git a/pkg/controller/namespace.go b/pkg/controller/namespace.go index 829933fcc48..ae069d5242e 100644 --- a/pkg/controller/namespace.go +++ b/pkg/controller/namespace.go @@ -120,13 +120,19 @@ func (c *Controller) handleAddNamespace(key string) error { } namespace := cachedNs.DeepCopy() - var ls string + var ls, ippool string var lss, cidrs, excludeIps []string subnets, err := c.subnetsLister.List(labels.Everything()) if err != nil { klog.Errorf("failed to list subnets %v", err) return err } + ippools, err := c.ippoolLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list ippools: %v", err) + return err + } + // check if subnet bind ns for _, s := range subnets { for _, ns := range s.Spec.Namespaces { @@ -139,6 +145,13 @@ func (c *Controller) handleAddNamespace(key string) error { } } + for _, p := range ippools { + if util.ContainsString(p.Spec.Namespaces, key) { + ippool = p.Name + break + } + } + if lss == nil { // If NS does not belong to any custom VPC, then this NS belongs to the default VPC vpc, err := c.vpcsLister.Get(c.config.ClusterRouter) @@ -178,7 +191,8 @@ func (c *Controller) handleAddNamespace(key string) error { } else { if namespace.Annotations[util.LogicalSwitchAnnotation] == strings.Join(lss, ",") && namespace.Annotations[util.CidrAnnotation] == strings.Join(cidrs, ";") && - namespace.Annotations[util.ExcludeIpsAnnotation] == strings.Join(excludeIps, ";") { + namespace.Annotations[util.ExcludeIpsAnnotation] == strings.Join(excludeIps, ";") && + namespace.Annotations[util.IpPoolAnnotation] == ippool { return nil } } @@ -186,6 +200,12 @@ func (c *Controller) handleAddNamespace(key string) error { namespace.Annotations[util.CidrAnnotation] = strings.Join(cidrs, ";") namespace.Annotations[util.ExcludeIpsAnnotation] = strings.Join(excludeIps, ";") + if ippool == "" { + delete(namespace.Annotations, util.IpPoolAnnotation) + } else { + namespace.Annotations[util.IpPoolAnnotation] = ippool + } + patch, err := util.GenerateStrategicMergePatchPayload(cachedNs, namespace) if err != nil { return err diff --git a/pkg/controller/node.go b/pkg/controller/node.go index 45bd2c0a04d..e126d789849 100644 --- a/pkg/controller/node.go +++ b/pkg/controller/node.go @@ -250,7 +250,7 @@ func (c *Controller) handleAddNode(key string) error { return err } } else { - v4IP, v6IP, mac, err = c.ipam.GetRandomAddress(portName, portName, nil, c.config.NodeSwitch, nil, true) + v4IP, v6IP, mac, err = c.ipam.GetRandomAddress(portName, portName, nil, c.config.NodeSwitch, "", nil, true) if err != nil { klog.Errorf("failed to alloc random ip addrs for node %v: %v", node.Name, err) return err diff --git a/pkg/controller/pod.go b/pkg/controller/pod.go index d9308934482..99476de9db3 100644 --- a/pkg/controller/pod.go +++ b/pkg/controller/pod.go @@ -1448,14 +1448,26 @@ func (c *Controller) acquireAddress(pod *v1.Pod, podNet *kubeovnNet) (string, st *macStr = "" } + ippoolStr := pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)] + if ippoolStr == "" { + ns, err := c.namespacesLister.Get(pod.Namespace) + if err != nil { + klog.Errorf("failed to get namespace %s: %v", pod.Namespace, err) + return "", "", "", podNet.Subnet, err + } + if len(ns.Annotations) != 0 { + ippoolStr = ns.Annotations[util.IpPoolAnnotation] + } + } + // Random allocate if pod.Annotations[fmt.Sprintf(util.IpAddressAnnotationTemplate, podNet.ProviderName)] == "" && - pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)] == "" { + ippoolStr == "" { var skippedAddrs []string for { portName := ovs.PodNameToPortName(podName, pod.Namespace, podNet.ProviderName) - ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(key, portName, macStr, podNet.Subnet.Name, skippedAddrs, !podNet.AllowLiveMigration) + ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(key, portName, macStr, podNet.Subnet.Name, "", skippedAddrs, !podNet.AllowLiveMigration) if err != nil { return "", "", "", podNet.Subnet, err } @@ -1497,17 +1509,42 @@ func (c *Controller) acquireAddress(pod *v1.Pod, podNet *kubeovnNet) (string, st } // IPPool allocate - if pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)] != "" { + if ippoolStr != "" { var ipPool []string - if strings.Contains(pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)], ";") { - ipPool = strings.Split(pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)], ";") + if strings.ContainsRune(ippoolStr, ';') { + ipPool = strings.Split(ippoolStr, ";") } else { - ipPool = strings.Split(pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)], ",") + ipPool = strings.Split(ippoolStr, ",") } for i, ip := range ipPool { ipPool[i] = strings.TrimSpace(ip) } + if len(ipPool) == 1 && net.ParseIP(ipPool[0]) == nil { + var skippedAddrs []string + for { + portName := ovs.PodNameToPortName(podName, pod.Namespace, podNet.ProviderName) + ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(key, portName, macStr, podNet.Subnet.Name, ipPool[0], skippedAddrs, !podNet.AllowLiveMigration) + if err != nil { + return "", "", "", podNet.Subnet, err + } + ipv4OK, ipv6OK, err := c.validatePodIP(pod.Name, podNet.Subnet.Name, ipv4, ipv6) + if err != nil { + return "", "", "", podNet.Subnet, err + } + if ipv4OK && ipv6OK { + return ipv4, ipv6, mac, podNet.Subnet, nil + } + + if !ipv4OK { + skippedAddrs = append(skippedAddrs, ipv4) + } + if !ipv6OK { + skippedAddrs = append(skippedAddrs, ipv6) + } + } + } + if !isStsPod { for _, net := range nsNets { for _, staticIP := range ipPool { @@ -1530,7 +1567,7 @@ func (c *Controller) acquireAddress(pod *v1.Pod, podNet *kubeovnNet) (string, st } } } - klog.Errorf("acquire address %s for %s failed, %v", pod.Annotations[fmt.Sprintf(util.IpPoolAnnotationTemplate, podNet.ProviderName)], key, err) + klog.Errorf("acquire address from ippool %s for %s failed, %v", ippoolStr, key, err) } else { tempStrs := strings.Split(pod.Name, "-") numStr := tempStrs[len(tempStrs)-1] diff --git a/pkg/controller/subnet.go b/pkg/controller/subnet.go index f376a6e26f4..80af215d5d7 100644 --- a/pkg/controller/subnet.go +++ b/pkg/controller/subnet.go @@ -838,6 +838,18 @@ func (c *Controller) handleUpdateSubnetStatus(key string) error { } return err } + + ippools, err := c.ippoolLister.List(labels.Everything()) + if err != nil { + klog.Errorf("failed to list ippool: %v", err) + return err + } + for _, p := range ippools { + if p.Spec.Subnet == subnet.Name { + c.updateIPPoolStatusQueue.Add(p.Name) + } + } + if util.CheckProtocol(subnet.Spec.CIDRBlock) == kubeovnv1.ProtocolDual { return calcDualSubnetStatusIP(subnet, c) } else { @@ -2071,11 +2083,12 @@ func checkAndFormatsExcludeIps(subnet *kubeovnv1.Subnet) bool { for _, excludeIP := range subnet.Spec.ExcludeIps { if _, ok := mapIps[excludeIP]; !ok { ips := strings.Split(excludeIP, "..") - if len(ips) == 1 { - mapIps[excludeIP] = ipam.NewIPRange(ipam.NewIP(ips[0]), ipam.NewIP(ips[0])) - } else { - mapIps[excludeIP] = ipam.NewIPRange(ipam.NewIP(ips[0]), ipam.NewIP(ips[1])) + start, _ := ipam.NewIP(ips[0]) + end := start + if len(ips) != 1 { + end, _ = ipam.NewIP(ips[1]) } + mapIps[excludeIP] = ipam.NewIPRange(start, end) } } newMap := filterRepeatIPRange(mapIps) diff --git a/pkg/controller/vip.go b/pkg/controller/vip.go index 4ed5d1f0f8a..1e6f629343d 100644 --- a/pkg/controller/vip.go +++ b/pkg/controller/vip.go @@ -340,7 +340,7 @@ func (c *Controller) acquireIpAddress(subnetName, name, nicName string) (string, checkConflict := true var err error for { - v4ip, v6ip, mac, err = c.ipam.GetRandomAddress(name, nicName, nil, subnetName, skippedAddrs, checkConflict) + v4ip, v6ip, mac, err = c.ipam.GetRandomAddress(name, nicName, nil, subnetName, "", skippedAddrs, checkConflict) if err != nil { return "", "", "", err } diff --git a/pkg/controller/vpc_nat_gw_eip.go b/pkg/controller/vpc_nat_gw_eip.go index eacc50f88ce..c013c6e4d95 100644 --- a/pkg/controller/vpc_nat_gw_eip.go +++ b/pkg/controller/vpc_nat_gw_eip.go @@ -579,7 +579,7 @@ func (c *Controller) acquireStaticEip(name, namespace, nicName, ip, externalSubn func (c *Controller) acquireEip(name, namespace, nicName, externalSubnet string) (string, string, string, error) { var skippedAddrs []string for { - ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(name, nicName, nil, externalSubnet, skippedAddrs, true) + ipv4, ipv6, mac, err := c.ipam.GetRandomAddress(name, nicName, nil, externalSubnet, "", skippedAddrs, true) if err != nil { return "", "", "", err } diff --git a/pkg/ipam/ip.go b/pkg/ipam/ip.go index 41e6d04eb9a..cdb3f334fd6 100644 --- a/pkg/ipam/ip.go +++ b/pkg/ipam/ip.go @@ -1,20 +1,23 @@ package ipam import ( + "fmt" "math/big" "net" ) type IP net.IP -func NewIP(s string) IP { +func NewIP(s string) (IP, error) { ip := net.ParseIP(s) - if ip != nil { - if ip4 := ip.To4(); ip4 != nil { - ip = ip4 - } + if ip == nil { + return nil, fmt.Errorf("invalid IP address %q", s) } - return IP(ip) + + if ip4 := ip.To4(); ip4 != nil { + ip = ip4 + } + return IP(ip), nil } func (a IP) To4() net.IP { diff --git a/pkg/ipam/ip_range.go b/pkg/ipam/ip_range.go index 32e0b8dc7a1..3ab829eb6c1 100644 --- a/pkg/ipam/ip_range.go +++ b/pkg/ipam/ip_range.go @@ -2,6 +2,8 @@ package ipam import ( "fmt" + "math/big" + "strconv" ) // IPRange represents an IP range of [start, end] @@ -33,6 +35,12 @@ func (r *IPRange) SetEnd(ip IP) { r.end = ip } +func (r *IPRange) Count() float64 { + n := big.NewInt(0).Sub(big.NewInt(0).SetBytes([]byte(r.end)), big.NewInt(0).SetBytes([]byte(r.start))) + count, _ := strconv.ParseFloat(n.Add(n, big.NewInt(1)).String(), 64) + return count +} + func (r *IPRange) Contains(ip IP) bool { return !r.start.GreaterThan(ip) && !r.end.LessThan(ip) } diff --git a/pkg/ipam/ip_range_list.go b/pkg/ipam/ip_range_list.go index aa161ebc372..a5d2f4d9d42 100644 --- a/pkg/ipam/ip_range_list.go +++ b/pkg/ipam/ip_range_list.go @@ -1,6 +1,7 @@ package ipam import ( + "fmt" "sort" "strings" ) @@ -13,15 +14,31 @@ func NewIPRangeList() *IPRangeList { return &IPRangeList{} } -func NewIPRangeListFrom(x ...string) *IPRangeList { +func NewIPRangeListFrom(x ...string) (*IPRangeList, error) { ret := &IPRangeList{make([]*IPRange, 0, len(x))} for _, s := range x { ips := strings.Split(s, "..") if len(ips) == 1 { - ret.Add(NewIP(ips[0])) + ip, err := NewIP(ips[0]) + if err != nil { + return nil, err + } + ret.Add(ip) } else { - n1, found1 := ret.Find(NewIP(ips[0])) - n2, found2 := ret.Find(NewIP(ips[1])) + start, err := NewIP(ips[0]) + if err != nil { + return nil, err + } + end, err := NewIP(ips[1]) + if err != nil { + return nil, err + } + if end.LessThan(start) { + return nil, fmt.Errorf("invalid IP range %s: end %s must NOT be less than start %s", s, ips[1], ips[0]) + } + + n1, found1 := ret.Find(start) + n2, found2 := ret.Find(end) if found1 { if found2 { if n1 != n2 { @@ -29,29 +46,29 @@ func NewIPRangeListFrom(x ...string) *IPRangeList { ret.ranges = append(ret.ranges[:n1+1], ret.ranges[n2+1:]...) } } else { - ret.ranges[n1].SetEnd(NewIP(ips[1])) + ret.ranges[n1].SetEnd(end) ret.ranges = append(ret.ranges[:n1+1], ret.ranges[n2:]...) } } else { if found2 { - ret.ranges[n2].SetStart(NewIP(ips[0])) + ret.ranges[n2].SetStart(start) ret.ranges = append(ret.ranges[:n1], ret.ranges[n2:]...) } else { if n1 == n2 { tmp := make([]*IPRange, ret.Len()+1) copy(tmp, ret.ranges[:n1]) - tmp[n1] = NewIPRange(NewIP(ips[0]), NewIP(ips[1])) + tmp[n1] = NewIPRange(start, end) copy(tmp[n1+1:], ret.ranges[n1:]) ret.ranges = tmp } else { - ret.ranges[n1] = NewIPRange(NewIP(ips[0]), NewIP(ips[1])) + ret.ranges[n1] = NewIPRange(start, end) ret.ranges = append(ret.ranges[:n1+1], ret.ranges[n2+1:]...) } } } } } - return ret + return ret, nil } func (r *IPRangeList) Clone() *IPRangeList { @@ -64,6 +81,14 @@ func (r *IPRangeList) Len() int { return len(r.ranges) } +func (r *IPRangeList) Count() float64 { + var sum float64 + for _, v := range r.ranges { + sum += v.Count() + } + return sum +} + func (r *IPRangeList) At(i int) *IPRange { if i < len(r.ranges) { return r.ranges[i] @@ -151,7 +176,7 @@ func (r *IPRangeList) Allocate(skipped []IP) IP { tmp.Add(ip) } - filtered := r.Difference(tmp) + filtered := r.Separate(tmp) if filtered.Len() == 0 { return nil } @@ -175,8 +200,8 @@ func (r *IPRangeList) Equal(x *IPRangeList) bool { return true } -// Difference returns a new list which contains items which are in `r` but not in `x` -func (r *IPRangeList) Difference(x *IPRangeList) *IPRangeList { +// Separate returns a new list which contains items which are in `r` but not in `x` +func (r *IPRangeList) Separate(x *IPRangeList) *IPRangeList { if r.Len() == 0 { return NewIPRangeList() } @@ -216,22 +241,23 @@ func (r *IPRangeList) Difference(x *IPRangeList) *IPRangeList { } func (r *IPRangeList) Merge(x *IPRangeList) *IPRangeList { - ret := &IPRangeList{make([]*IPRange, 0, r.Len()+x.Len())} + s := r.Separate(x) + ret := &IPRangeList{make([]*IPRange, 0, s.Len()+x.Len())} var i, j int - for i != r.Len() || j != x.Len() { - if i == r.Len() { + for i != s.Len() || j != x.Len() { + if i == s.Len() { ret.ranges = append(ret.ranges, x.ranges[j].Clone()) j++ continue } if j == x.Len() { - ret.ranges = append(ret.ranges, r.ranges[i].Clone()) + ret.ranges = append(ret.ranges, s.ranges[i].Clone()) i++ continue } - if r.ranges[i].Start().LessThan(x.ranges[j].Start()) { - ret.ranges = append(ret.ranges, r.ranges[i].Clone()) + if s.ranges[i].Start().LessThan(x.ranges[j].Start()) { + ret.ranges = append(ret.ranges, s.ranges[i].Clone()) i++ } else { ret.ranges = append(ret.ranges, x.ranges[j].Clone()) @@ -239,7 +265,20 @@ func (r *IPRangeList) Merge(x *IPRangeList) *IPRangeList { } } - return ret + for i := 0; i < ret.Len()-1; i++ { + if ret.ranges[i].End().Add(1).Equal(ret.ranges[i+1].Start()) { + ret.ranges[i].end = ret.ranges[i+1].end + ret.ranges = append(ret.ranges[:i+1], ret.ranges[i+2:]...) + } + } + + return ret.Clone() +} + +// Intersect returns a new list which contains items which are in both `r` and `x` +func (r *IPRangeList) Intersect(x *IPRangeList) *IPRangeList { + r1, r2 := r.Separate(x), x.Separate(r) + return r.Merge(x).Separate(r1).Separate(r2) } func (r *IPRangeList) String() string { diff --git a/pkg/ipam/ipam.go b/pkg/ipam/ipam.go index 06081eb9ee2..28c203bbf1f 100644 --- a/pkg/ipam/ipam.go +++ b/pkg/ipam/ipam.go @@ -39,7 +39,7 @@ func NewIPAM() *IPAM { } } -func (ipam *IPAM) GetRandomAddress(podName, nicName string, mac *string, subnetName string, skippedAddrs []string, checkConflict bool) (string, string, string, error) { +func (ipam *IPAM) GetRandomAddress(podName, nicName string, mac *string, subnetName, poolName string, skippedAddrs []string, checkConflict bool) (string, string, string, error) { ipam.mutex.RLock() defer ipam.mutex.RUnlock() @@ -48,14 +48,19 @@ func (ipam *IPAM) GetRandomAddress(podName, nicName string, mac *string, subnetN return "", "", "", ErrNoAvailable } - v4IP, v6IP, macStr, err := subnet.GetRandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) - klog.Infof("allocate v4 %s v6 %s mac %s for %s from subnet %s", v4IP, v6IP, macStr, podName, subnetName) + v4IP, v6IP, macStr, err := subnet.GetRandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) + if poolName == "" { + klog.Infof("allocate v4 %s v6 %s mac %s for %s from subnet %s", v4IP, v6IP, macStr, podName, subnetName) + } else { + klog.Infof("allocate v4 %s v6 %s mac %s for %s from ippool %s in subnet %s", v4IP, v6IP, macStr, podName, poolName, subnetName) + } return v4IP.String(), v6IP.String(), macStr, err } func (ipam *IPAM) GetStaticAddress(podName, nicName, ip string, mac *string, subnetName string, checkConflict bool) (string, string, string, error) { ipam.mutex.RLock() defer ipam.mutex.RUnlock() + if subnet, ok := ipam.Subnets[subnetName]; !ok { return "", "", "", ErrNoAvailable } else { @@ -64,7 +69,11 @@ func (ipam *IPAM) GetStaticAddress(podName, nicName, ip string, mac *string, sub var ipAddr IP var macStr string for _, ipStr := range strings.Split(ip, ",") { - ipAddr, macStr, err = subnet.GetStaticAddress(podName, nicName, NewIP(ipStr), mac, false, checkConflict) + ip, err := NewIP(ipStr) + if err != nil { + return "", "", "", err + } + ipAddr, macStr, err = subnet.GetStaticAddress(podName, nicName, ip, mac, false, checkConflict) if err != nil { return "", "", "", err } @@ -78,13 +87,13 @@ func (ipam *IPAM) GetStaticAddress(podName, nicName, ip string, mac *string, sub switch subnet.Protocol { case kubeovnv1.ProtocolIPv4: - klog.Infof("allocate v4 %s mac %s for %s", ip, macStr, podName) + klog.Infof("allocate v4 %s mac %s for %s from subnet %s", ip, macStr, podName, subnetName) return ip, "", macStr, err case kubeovnv1.ProtocolIPv6: - klog.Infof("allocate v6 %s mac %s for %s", ip, macStr, podName) + klog.Infof("allocate v6 %s mac %s for %s from subnet %s", ip, macStr, podName, subnetName) return "", ip, macStr, err case kubeovnv1.ProtocolDual: - klog.Infof("allocate v4 %s v6 %s mac %s for %s", ips[0].String(), ips[1].String(), macStr, podName) + klog.Infof("allocate v4 %s v6 %s mac %s for %s from subnet %s", ips[0].String(), ips[1].String(), macStr, podName, subnetName) return ips[0].String(), ips[1].String(), macStr, err } } @@ -102,10 +111,10 @@ func checkAndAppendIpsForDual(ips []IP, mac, podName, nicName string, subnet *Su var err error if util.CheckProtocol(ips[0].String()) == kubeovnv1.ProtocolIPv4 { newIps = ips - _, ipAddr, _, err = subnet.getV6RandomAddress(podName, nicName, &mac, nil, checkConflict) + _, ipAddr, _, err = subnet.getV6RandomAddress("", podName, nicName, &mac, nil, checkConflict) newIps = append(newIps, ipAddr) } else if util.CheckProtocol(ips[0].String()) == kubeovnv1.ProtocolIPv6 { - ipAddr, _, _, err = subnet.getV4RandomAddress(podName, nicName, &mac, nil, checkConflict) + ipAddr, _, _, err = subnet.getV4RandomAddress("", podName, nicName, &mac, nil, checkConflict) newIps = append(newIps, ipAddr) newIps = append(newIps, ips...) } @@ -157,46 +166,107 @@ func (ipam *IPAM) AddOrUpdateSubnet(name, cidrStr, gw string, excludeIps []strin if subnet, ok := ipam.Subnets[name]; ok { subnet.Protocol = protocol - v4Reserved := NewIPRangeListFrom(v4ExcludeIps...) - v6Reserved := NewIPRangeListFrom(v6ExcludeIps...) - if protocol == kubeovnv1.ProtocolDual || protocol == kubeovnv1.ProtocolIPv4 && - (subnet.V4CIDR.String() != v4cidrStr || subnet.V4Gw != v4Gw || !subnet.V4ReservedIPList.Equal(v4Reserved)) { + v4Reserved, err := NewIPRangeListFrom(v4ExcludeIps...) + if err != nil { + return err + } + v6Reserved, err := NewIPRangeListFrom(v6ExcludeIps...) + if err != nil { + return err + } + if (protocol == kubeovnv1.ProtocolDual || protocol == kubeovnv1.ProtocolIPv4) && + (subnet.V4CIDR.String() != v4cidrStr || subnet.V4Gw != v4Gw || !subnet.V4Reserved.Equal(v4Reserved)) { _, cidr, _ := net.ParseCIDR(v4cidrStr) subnet.V4CIDR = cidr - subnet.V4ReservedIPList = v4Reserved + subnet.V4Reserved = v4Reserved firstIP, _ := util.FirstIP(v4cidrStr) lastIP, _ := util.LastIP(v4cidrStr) - subnet.V4FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)).Difference(subnet.V4ReservedIPList) - subnet.V4AvailIPList = subnet.V4FreeIPList.Clone() - subnet.V4ReleasedIPList = NewIPRangeList() + ips, _ := NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + subnet.V4Free = ips.Separate(subnet.V4Reserved) + subnet.V4Available = subnet.V4Free.Clone() + subnet.V4Using = subnet.V4Using.Intersect(ips) subnet.V4Gw = v4Gw + + pool := subnet.IPPools[""] + pool.V4IPs = ips + pool.V4Free = subnet.V4Free.Clone() + pool.V4Reserved = subnet.V4Reserved.Clone() + pool.V4Released = NewIPRangeList() + pool.V4Using = subnet.V4Using.Clone() + + for name, p := range subnet.IPPools { + if name == "" { + continue + } + p.V4Free = ips.Intersect(p.V4IPs) + p.V4Reserved = subnet.V4Reserved.Intersect(p.V4IPs) + p.V4Available = p.V4Free.Clone() + p.V4Released = NewIPRangeList() + pool.V4Free = pool.V4Free.Separate(p.V4IPs) + pool.V4Reserved = p.V4Reserved.Separate(p.V4Reserved) + } + pool.V4Available = pool.V4Free.Clone() + for nicName, ip := range subnet.V4NicToIP { - mac := subnet.NicToMac[nicName] - podName := subnet.V4IPToPod[ip.String()] - if _, _, err := subnet.GetStaticAddress(podName, nicName, ip, &mac, true, true); err != nil { - klog.Errorf("%s address not in subnet %s new cidr %s: %v", podName, name, cidrStr, err) + if !ips.Contains(ip) { + podName := subnet.V4IPToPod[ip.String()] + klog.Errorf("%s address %s not in subnet %s new cidr %s", podName, ip.String(), name, cidrStr) + delete(subnet.V4NicToIP, nicName) + delete(subnet.V4IPToPod, ip.String()) + continue } } } - if protocol == kubeovnv1.ProtocolDual || protocol == kubeovnv1.ProtocolIPv6 && - (subnet.V6CIDR.String() != v6cidrStr || subnet.V6Gw != v6Gw || !subnet.V6ReservedIPList.Equal(v6Reserved)) { + if (protocol == kubeovnv1.ProtocolDual || protocol == kubeovnv1.ProtocolIPv6) && + (subnet.V6CIDR.String() != v6cidrStr || subnet.V6Gw != v6Gw || !subnet.V6Reserved.Equal(v6Reserved)) { _, cidr, _ := net.ParseCIDR(v6cidrStr) subnet.V6CIDR = cidr - subnet.V6ReservedIPList = v6Reserved + subnet.V6Reserved = v6Reserved firstIP, _ := util.FirstIP(v6cidrStr) lastIP, _ := util.LastIP(v6cidrStr) - subnet.V6FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)).Difference(subnet.V6ReservedIPList) - subnet.V6AvailIPList = subnet.V6FreeIPList.Clone() - subnet.V6ReleasedIPList = NewIPRangeList() + ips, _ := NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + subnet.V6Free = ips.Separate(subnet.V6Reserved) + subnet.V6Available = subnet.V6Free.Clone() + subnet.V6Using = subnet.V6Using.Intersect(ips) subnet.V6Gw = v6Gw + + pool := subnet.IPPools[""] + pool.V6IPs = ips + pool.V6Free = subnet.V6Free.Clone() + pool.V6Reserved = subnet.V6Reserved.Clone() + pool.V6Released = NewIPRangeList() + pool.V6Using = subnet.V6Using.Clone() + + for name, p := range subnet.IPPools { + if name == "" { + continue + } + p.V6Free = ips.Intersect(p.V6IPs) + p.V6Reserved = subnet.V6Reserved.Intersect(p.V6IPs) + p.V6Available = p.V6Free.Clone() + p.V6Released = NewIPRangeList() + pool.V6Free = pool.V6Free.Separate(p.V6IPs) + pool.V6Reserved = p.V6Reserved.Separate(p.V6Reserved) + } + pool.V6Available = pool.V6Free.Clone() + for nicName, ip := range subnet.V6NicToIP { - mac := subnet.NicToMac[nicName] - podName := subnet.V6IPToPod[ip.String()] - if _, _, err := subnet.GetStaticAddress(podName, nicName, ip, &mac, true, true); err != nil { - klog.Errorf("%s address not in subnet %s new cidr %s: %v", podName, name, cidrStr, err) + if !ips.Contains(ip) { + podName := subnet.V6IPToPod[ip.String()] + klog.Errorf("%s address %s not in subnet %s new cidr %s", podName, ip.String(), name, cidrStr) + delete(subnet.V6NicToIP, nicName) + delete(subnet.V6IPToPod, ip.String()) + continue } } } + + for nicName, mac := range subnet.NicToMac { + if subnet.V4NicToIP[nicName] == nil && subnet.V6NicToIP[nicName] == nil { + delete(subnet.NicToMac, nicName) + delete(subnet.MacToPod, mac) + } + } return nil } @@ -244,8 +314,14 @@ func (ipam *IPAM) GetPodAddress(podName string) []*SubnetAddress { func (ipam *IPAM) ContainAddress(address string) bool { ipam.mutex.RLock() defer ipam.mutex.RUnlock() + ip, err := NewIP(address) + if ip == nil { + klog.Error(err) + return false + } + for _, subnet := range ipam.Subnets { - if subnet.ContainAddress(NewIP(address)) { + if subnet.ContainAddress(ip) { return true } } @@ -278,11 +354,45 @@ func (ipam *IPAM) GetSubnetIPRangeString(subnetName string) (string, string, str var v4UsingIPStr, v6UsingIPStr, v4AvailableIPStr, v6AvailableIPStr string if subnet, ok := ipam.Subnets[subnetName]; ok { - v4UsingIPStr = subnet.V4UsingIPList.String() - v6UsingIPStr = subnet.V6UsingIPList.String() - v4AvailableIPStr = subnet.V4AvailIPList.String() - v6AvailableIPStr = subnet.V6AvailIPList.String() + v4UsingIPStr = subnet.V4Using.String() + v6UsingIPStr = subnet.V6Using.String() + v4AvailableIPStr = subnet.V4Available.String() + v6AvailableIPStr = subnet.V6Available.String() } return v4UsingIPStr, v6UsingIPStr, v4AvailableIPStr, v6AvailableIPStr } + +func (ipam *IPAM) AddOrUpdateIPPool(subnet, ippool string, ips []string) error { + ipam.mutex.RLock() + defer ipam.mutex.RUnlock() + + s := ipam.Subnets[subnet] + if s == nil { + return fmt.Errorf("subnet %s does not exist in IPAM", subnet) + } + + return s.AddOrUpdateIPPool(ippool, ips) +} + +func (ipam *IPAM) RemoveIPPool(subnet, ippool string) { + ipam.mutex.RLock() + if s := ipam.Subnets[subnet]; s != nil { + s.RemoveIPPool(ippool) + } + ipam.mutex.RUnlock() +} + +func (ipam *IPAM) IPPoolStatistics(subnet, ippool string) ( + v4Available, v4Using, v6Available, v6Using float64, + v4AvailableRange, v4UsingRange, v6AvailableRange, v6UsingRange string, +) { + ipam.mutex.RLock() + defer ipam.mutex.RUnlock() + + s := ipam.Subnets[subnet] + if s == nil { + return + } + return s.IPPoolStatistics(ippool) +} diff --git a/pkg/ipam/ippool.go b/pkg/ipam/ippool.go new file mode 100644 index 00000000000..e6a5d35dbe3 --- /dev/null +++ b/pkg/ipam/ippool.go @@ -0,0 +1,16 @@ +package ipam + +type IPPool struct { + V4IPs *IPRangeList + V4Free *IPRangeList + V4Available *IPRangeList + V4Reserved *IPRangeList + V4Released *IPRangeList + V4Using *IPRangeList + V6IPs *IPRangeList + V6Free *IPRangeList + V6Available *IPRangeList + V6Reserved *IPRangeList + V6Released *IPRangeList + V6Using *IPRangeList +} diff --git a/pkg/ipam/subnet.go b/pkg/ipam/subnet.go index ce2e40cdac1..dcc83f976c0 100644 --- a/pkg/ipam/subnet.go +++ b/pkg/ipam/subnet.go @@ -13,30 +13,31 @@ import ( ) type Subnet struct { - Name string - mutex sync.RWMutex - Protocol string - V4CIDR *net.IPNet - V4FreeIPList *IPRangeList - V4ReleasedIPList *IPRangeList - V4ReservedIPList *IPRangeList - V4AvailIPList *IPRangeList - V4UsingIPList *IPRangeList - V4NicToIP map[string]IP - V4IPToPod map[string]string - V6CIDR *net.IPNet - V6FreeIPList *IPRangeList - V6ReleasedIPList *IPRangeList - V6ReservedIPList *IPRangeList - V6AvailIPList *IPRangeList - V6UsingIPList *IPRangeList - V6NicToIP map[string]IP - V6IPToPod map[string]string - NicToMac map[string]string - MacToPod map[string]string - PodToNicList map[string][]string - V4Gw string - V6Gw string + Name string + mutex sync.RWMutex + CIDR string + Protocol string + V4CIDR *net.IPNet + V4Free *IPRangeList + V4Reserved *IPRangeList + V4Available *IPRangeList + V4Using *IPRangeList + V4NicToIP map[string]IP + V4IPToPod map[string]string + V6CIDR *net.IPNet + V6Free *IPRangeList + V6Reserved *IPRangeList + V6Available *IPRangeList + V6Using *IPRangeList + V6NicToIP map[string]IP + V6IPToPod map[string]string + NicToMac map[string]string + MacToPod map[string]string + PodToNicList map[string][]string + V4Gw string + V6Gw string + + IPPools map[string]*IPPool } func NewSubnet(name, cidrStr string, excludeIps []string) (*Subnet, error) { @@ -52,38 +53,45 @@ func NewSubnet(name, cidrStr string, excludeIps []string) (*Subnet, error) { // subnet.Spec.ExcludeIps contains both v4 and v6 addresses excludeIps = util.ExpandExcludeIPs(excludeIps, cidrStr) v4ExcludeIps, v6ExcludeIps := util.SplitIpsByProtocol(excludeIps) + v4Reserved, err := NewIPRangeListFrom(v4ExcludeIps...) + if err != nil { + return nil, err + } + v6Reserved, err := NewIPRangeListFrom(v6ExcludeIps...) + if err != nil { + return nil, err + } protocol := util.CheckProtocol(cidrStr) subnet := &Subnet{ - Name: name, - mutex: sync.RWMutex{}, - Protocol: protocol, - V4FreeIPList: NewIPRangeList(), - V6FreeIPList: NewIPRangeList(), - V4ReservedIPList: NewIPRangeListFrom(v4ExcludeIps...), - V6ReservedIPList: NewIPRangeListFrom(v6ExcludeIps...), - V4ReleasedIPList: NewIPRangeList(), - V6ReleasedIPList: NewIPRangeList(), - V4UsingIPList: NewIPRangeList(), - V6UsingIPList: NewIPRangeList(), - V4NicToIP: map[string]IP{}, - V6NicToIP: map[string]IP{}, - V4IPToPod: map[string]string{}, - V6IPToPod: map[string]string{}, - MacToPod: map[string]string{}, - NicToMac: map[string]string{}, - PodToNicList: map[string][]string{}, + Name: name, + CIDR: cidrStr, + Protocol: protocol, + V4Free: NewIPRangeList(), + V6Free: NewIPRangeList(), + V4Reserved: v4Reserved, + V6Reserved: v6Reserved, + V4Using: NewIPRangeList(), + V6Using: NewIPRangeList(), + V4NicToIP: map[string]IP{}, + V6NicToIP: map[string]IP{}, + V4IPToPod: map[string]string{}, + V6IPToPod: map[string]string{}, + MacToPod: map[string]string{}, + NicToMac: map[string]string{}, + PodToNicList: map[string][]string{}, + IPPools: make(map[string]*IPPool, 0), } if protocol == kubeovnv1.ProtocolIPv4 { firstIP, _ := util.FirstIP(cidrStr) lastIP, _ := util.LastIP(cidrStr) subnet.V4CIDR = cidrs[0] - subnet.V4FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + subnet.V4Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) } else if protocol == kubeovnv1.ProtocolIPv6 { firstIP, _ := util.FirstIP(cidrStr) lastIP, _ := util.LastIP(cidrStr) subnet.V6CIDR = cidrs[0] - subnet.V6FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + subnet.V6Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) } else { subnet.V4CIDR = cidrs[0] subnet.V6CIDR = cidrs[1] @@ -92,13 +100,29 @@ func NewSubnet(name, cidrStr string, excludeIps []string) (*Subnet, error) { v4LastIP, _ := util.LastIP(cidrBlocks[0]) v6FirstIP, _ := util.FirstIP(cidrBlocks[1]) v6LastIP, _ := util.LastIP(cidrBlocks[1]) - subnet.V4FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", v4FirstIP, v4LastIP)) - subnet.V6FreeIPList = NewIPRangeListFrom(fmt.Sprintf("%s..%s", v6FirstIP, v6LastIP)) - } - subnet.V4FreeIPList = subnet.V4FreeIPList.Difference(subnet.V4ReservedIPList) - subnet.V6FreeIPList = subnet.V6FreeIPList.Difference(subnet.V6ReservedIPList) - subnet.V4AvailIPList = subnet.V4FreeIPList.Clone() - subnet.V6AvailIPList = subnet.V6FreeIPList.Clone() + subnet.V4Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", v4FirstIP, v4LastIP)) + subnet.V6Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", v6FirstIP, v6LastIP)) + } + + pool := &IPPool{ + V4IPs: subnet.V4Free.Clone(), + V6IPs: subnet.V6Free.Clone(), + V4Released: NewIPRangeList(), + V6Released: NewIPRangeList(), + V4Using: NewIPRangeList(), + V6Using: NewIPRangeList(), + } + subnet.V4Free = subnet.V4Free.Separate(subnet.V4Reserved) + subnet.V6Free = subnet.V6Free.Separate(subnet.V6Reserved) + subnet.V4Available = subnet.V4Free.Clone() + subnet.V6Available = subnet.V6Free.Clone() + pool.V4Free = subnet.V4Free.Clone() + pool.V6Free = subnet.V6Free.Clone() + pool.V4Available = subnet.V4Available.Clone() + pool.V6Available = subnet.V6Available.Clone() + pool.V4Reserved = subnet.V4Reserved.Clone() + pool.V6Reserved = subnet.V6Reserved.Clone() + subnet.IPPools = map[string]*IPPool{"": pool} return subnet, nil } @@ -144,7 +168,7 @@ func (subnet *Subnet) popPodNic(podName, nicName string) { } } -func (subnet *Subnet) GetRandomAddress(podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { +func (subnet *Subnet) GetRandomAddress(poolName, podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { subnet.mutex.Lock() defer func() { subnet.pushPodNic(podName, nicName) @@ -152,33 +176,33 @@ func (subnet *Subnet) GetRandomAddress(podName, nicName string, mac *string, ski }() if subnet.Protocol == kubeovnv1.ProtocolDual { - return subnet.getDualRandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) + return subnet.getDualRandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) } else if subnet.Protocol == kubeovnv1.ProtocolIPv4 { - return subnet.getV4RandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) + return subnet.getV4RandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) } else { - return subnet.getV6RandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) + return subnet.getV6RandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) } } -func (subnet *Subnet) getDualRandomAddress(podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { - v4IP, _, _, err := subnet.getV4RandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) +func (subnet *Subnet) getDualRandomAddress(poolName, podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { + v4IP, _, _, err := subnet.getV4RandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) if err != nil { return nil, nil, "", err } - _, v6IP, macStr, err := subnet.getV6RandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) + _, v6IP, macStr, err := subnet.getV6RandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) if err != nil { return nil, nil, "", err } // allocated IPv4 address may be released in getV6RandomAddress() if !subnet.V4NicToIP[nicName].Equal(v4IP) { - v4IP, _, _, _ = subnet.getV4RandomAddress(podName, nicName, mac, skippedAddrs, checkConflict) + v4IP, _, _, _ = subnet.getV4RandomAddress(poolName, podName, nicName, mac, skippedAddrs, checkConflict) } return v4IP, v6IP, macStr, nil } -func (subnet *Subnet) getV4RandomAddress(podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { +func (subnet *Subnet) getV4RandomAddress(ippoolName, podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { // After 'macAdd' introduced to support only static mac address, pod restart will run into error mac AddressConflict // controller will re-enqueue the new pod then wait for old pod deleted and address released. // here will return only if both ip and mac exist, otherwise only ip without mac returned will trigger CreatePort error. @@ -189,25 +213,35 @@ func (subnet *Subnet) getV4RandomAddress(podName, nicName string, mac *string, s subnet.releaseAddr(podName, nicName) } - if subnet.V4FreeIPList.Len() == 0 { - if subnet.V4ReleasedIPList.Len() == 0 { + pool := subnet.IPPools[ippoolName] + if pool == nil { + return nil, nil, "", ErrNoAvailable + } + + if pool.V4Free.Len() == 0 { + if pool.V4Released.Len() == 0 { return nil, nil, "", ErrNoAvailable } - subnet.V4FreeIPList = subnet.V4ReleasedIPList - subnet.V4ReleasedIPList = NewIPRangeList() + pool.V4Free = pool.V4Released + pool.V4Released = NewIPRangeList() } skipped := make([]IP, 0, len(skippedAddrs)) for _, s := range skippedAddrs { - skipped = append(skipped, NewIP(s)) + if ip, _ := NewIP(s); ip != nil { + skipped = append(skipped, ip) + } } - ip := subnet.V4FreeIPList.Allocate(skipped) + ip := pool.V4Free.Allocate(skipped) if ip == nil { return nil, nil, "", ErrConflict } - subnet.V4AvailIPList.Remove(ip) - subnet.V4UsingIPList.Add(ip) + pool.V4Available.Remove(ip) + pool.V4Using.Add(ip) + subnet.V4Free.Remove(ip) + subnet.V4Available.Remove(ip) + subnet.V4Using.Add(ip) subnet.V4NicToIP[nicName] = ip subnet.V4IPToPod[ip.String()] = podName @@ -222,7 +256,7 @@ func (subnet *Subnet) getV4RandomAddress(podName, nicName string, mac *string, s } } -func (subnet *Subnet) getV6RandomAddress(podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { +func (subnet *Subnet) getV6RandomAddress(ippoolName, podName, nicName string, mac *string, skippedAddrs []string, checkConflict bool) (IP, IP, string, error) { // After 'macAdd' introduced to support only static mac address, pod restart will run into error mac AddressConflict // controller will re-enqueue the new pod then wait for old pod deleted and address released. // here will return only if both ip and mac exist, otherwise only ip without mac returned will trigger CreatePort error. @@ -233,25 +267,35 @@ func (subnet *Subnet) getV6RandomAddress(podName, nicName string, mac *string, s subnet.releaseAddr(podName, nicName) } - if subnet.V6FreeIPList.Len() == 0 { - if subnet.V6ReleasedIPList.Len() == 0 { + pool := subnet.IPPools[ippoolName] + if pool == nil { + return nil, nil, "", ErrNoAvailable + } + + if pool.V6Free.Len() == 0 { + if pool.V6Released.Len() == 0 { return nil, nil, "", ErrNoAvailable } - subnet.V6FreeIPList = subnet.V6ReleasedIPList - subnet.V6ReleasedIPList = NewIPRangeList() + pool.V6Free = pool.V6Released + pool.V6Released = NewIPRangeList() } skipped := make([]IP, 0, len(skippedAddrs)) for _, s := range skippedAddrs { - skipped = append(skipped, NewIP(s)) + if ip, _ := NewIP(s); ip != nil { + skipped = append(skipped, ip) + } } - ip := subnet.V6FreeIPList.Allocate(skipped) + ip := pool.V6Free.Allocate(skipped) if ip == nil { return nil, nil, "", ErrConflict } - subnet.V6AvailIPList.Remove(ip) - subnet.V6UsingIPList.Add(ip) + pool.V6Available.Remove(ip) + pool.V6Using.Add(ip) + subnet.V6Free.Remove(ip) + subnet.V6Available.Remove(ip) + subnet.V6Using.Add(ip) subnet.V6NicToIP[nicName] = ip subnet.V6IPToPod[ip.String()] = podName @@ -270,20 +314,6 @@ func (subnet *Subnet) GetStaticAddress(podName, nicName string, ip IP, mac *stri var v4, v6 bool isAllocated := false subnet.mutex.Lock() - defer func() { - subnet.pushPodNic(podName, nicName) - if isAllocated { - if v4 { - subnet.V4AvailIPList.Remove(ip) - subnet.V4UsingIPList.Add(ip) - } - if v6 { - subnet.V6AvailIPList.Remove(ip) - subnet.V6UsingIPList.Add(ip) - } - } - subnet.mutex.Unlock() - }() if ip.To4() != nil { v4 = subnet.V4CIDR != nil @@ -293,11 +323,41 @@ func (subnet *Subnet) GetStaticAddress(podName, nicName string, ip IP, mac *stri if v4 && !subnet.V4CIDR.Contains(net.IP(ip)) { return ip, "", ErrOutOfRange } - if v6 && !subnet.V6CIDR.Contains(net.IP(ip)) { return ip, "", ErrOutOfRange } + var pool *IPPool + for _, p := range subnet.IPPools { + if v4 && p.V4IPs.Contains(ip) { + pool = p + break + } + if v6 && p.V6IPs.Contains(ip) { + pool = p + break + } + } + + defer func() { + subnet.pushPodNic(podName, nicName) + if isAllocated { + if v4 { + subnet.V4Available.Remove(ip) + subnet.V4Using.Add(ip) + pool.V4Available.Remove(ip) + pool.V4Using.Add(ip) + } + if v6 { + subnet.V6Available.Remove(ip) + subnet.V6Using.Add(ip) + pool.V6Available.Remove(ip) + pool.V6Using.Add(ip) + } + } + subnet.mutex.Unlock() + }() + var macStr string if mac == nil { if m, ok := subnet.NicToMac[nicName]; ok { @@ -329,18 +389,19 @@ func (subnet *Subnet) GetStaticAddress(podName, nicName string, ip IP, mac *stri } } - if subnet.V4ReservedIPList.Contains(ip) { + if pool.V4Reserved.Contains(ip) { subnet.V4NicToIP[nicName] = ip subnet.V4IPToPod[ip.String()] = podName return ip, macStr, nil } - if subnet.V4FreeIPList.Remove(ip) { + if pool.V4Free.Remove(ip) { + subnet.V4Free.Remove(ip) subnet.V4NicToIP[nicName] = ip subnet.V4IPToPod[ip.String()] = podName isAllocated = true return ip, macStr, nil - } else if subnet.V4ReleasedIPList.Remove(ip) { + } else if pool.V4Released.Remove(ip) { subnet.V4NicToIP[nicName] = ip subnet.V4IPToPod[ip.String()] = podName isAllocated = true @@ -363,18 +424,19 @@ func (subnet *Subnet) GetStaticAddress(podName, nicName string, ip IP, mac *stri } } - if subnet.V6ReservedIPList.Contains(ip) { + if pool.V6Reserved.Contains(ip) { subnet.V6NicToIP[nicName] = ip subnet.V6IPToPod[ip.String()] = podName return ip, macStr, nil } - if subnet.V6FreeIPList.Remove(ip) { + if pool.V6Free.Remove(ip) { + subnet.V6Free.Remove(ip) subnet.V6NicToIP[nicName] = ip subnet.V6IPToPod[ip.String()] = podName isAllocated = true return ip, macStr, nil - } else if subnet.V6ReleasedIPList.Remove(ip) { + } else if pool.V6Released.Remove(ip) { subnet.V6NicToIP[nicName] = ip subnet.V6IPToPod[ip.String()] = podName isAllocated = true @@ -404,23 +466,28 @@ func (subnet *Subnet) releaseAddr(podName, nicName string) { // When CIDR changed, do not relocate ip to CIDR list if !subnet.V4CIDR.Contains(net.IP(ip)) { // Continue to release IPv6 address - klog.Infof("release v4 %s mac %s for %s, ignore ip", ip, mac, podName) + klog.Infof("release v4 %s mac %s from subnet %s for %s, ignore ip", ip, mac, subnet.Name, podName) changed = true } - if subnet.V4ReservedIPList.Contains(ip) { - klog.Infof("release v4 %s mac %s for %s, ip is in reserved list", ip, mac, podName) + if subnet.V4Reserved.Contains(ip) { + klog.Infof("release v4 %s mac %s from subnet %s for %s, ip is in reserved list", ip, mac, subnet.Name, podName) changed = true } - if !changed { - if subnet.V4ReleasedIPList.Add(ip) { - klog.Infof("release v4 %s mac %s for %s, add ip to released list", ip, mac, podName) + subnet.V4Available.Add(ip) + subnet.V4Using.Remove(ip) + for _, pool := range subnet.IPPools { + if pool.V4Using.Remove(ip) { + pool.V4Available.Add(ip) + if !changed { + if pool.V4Released.Add(ip) { + klog.Infof("release v4 %s mac %s from subnet %s for %s, add ip to released list", ip, mac, subnet.Name, podName) + } + } + break } } - - subnet.V4AvailIPList.Add(ip) - subnet.V4UsingIPList.Remove(ip) } } if ip, ok = subnet.V6NicToIP[nicName]; ok { @@ -438,23 +505,28 @@ func (subnet *Subnet) releaseAddr(podName, nicName string) { changed = false // When CIDR changed, do not relocate ip to CIDR list if !subnet.V6CIDR.Contains(net.IP(ip)) { - klog.Infof("release v6 %s mac %s for %s, ignore ip", ip, mac, podName) + klog.Infof("release v6 %s mac %s from subnet %s for %s, ignore ip", ip, mac, subnet.Name, podName) changed = true } - if subnet.V6ReservedIPList.Contains(ip) { - klog.Infof("release v6 %s mac %s for %s, ip is in reserved list", ip, mac, podName) + if subnet.V6Reserved.Contains(ip) { + klog.Infof("release v6 %s mac %s from subnet %s for %s, ip is in reserved list", ip, mac, subnet.Name, podName) changed = true } - if !changed { - if subnet.V6ReleasedIPList.Add(ip) { - klog.Infof("release v6 %s mac %s for %s, add ip to released list", ip, mac, podName) + subnet.V6Available.Add(ip) + subnet.V6Using.Remove(ip) + for _, pool := range subnet.IPPools { + if pool.V6Using.Remove(ip) { + pool.V6Available.Add(ip) + if !changed { + if pool.V6Released.Add(ip) { + klog.Infof("release v6 %s mac %s from subnet %s for %s, add ip to released list", ip, mac, subnet.Name, podName) + } + } + break } } - - subnet.V6AvailIPList.Add(ip) - subnet.V6UsingIPList.Remove(ip) } } } @@ -519,3 +591,146 @@ func (subnet *Subnet) isIPAssignedToOtherPod(ip, podName string) (string, bool) } return "", false } + +func (s *Subnet) AddOrUpdateIPPool(name string, ips []string) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + pool := &IPPool{ + V4IPs: NewIPRangeList(), + V6IPs: NewIPRangeList(), + V4Free: NewIPRangeList(), + V6Free: NewIPRangeList(), + V4Available: NewIPRangeList(), + V6Available: NewIPRangeList(), + V4Reserved: NewIPRangeList(), + V6Reserved: NewIPRangeList(), + V4Released: NewIPRangeList(), + V6Released: NewIPRangeList(), + V4Using: NewIPRangeList(), + V6Using: NewIPRangeList(), + } + + var err error + v4IPs, v6IPs := util.SplitIpsByProtocol(ips) + if s.V4CIDR != nil { + if pool.V4IPs, err = NewIPRangeListFrom(v4IPs...); err != nil { + return err + } + for k, v := range s.IPPools { + if k == "" || k == name { + continue + } + if r := pool.V4IPs.Intersect(v.V4IPs); r.Len() != 0 { + return fmt.Errorf("ippool %s has conflict IPs with ippool %s: %s", name, k, r.String()) + } + } + + firstIP, _ := util.FirstIP(s.V4CIDR.String()) + lastIP, _ := util.LastIP(s.V4CIDR.String()) + pool.V4Reserved = s.V4Reserved.Intersect(pool.V4IPs) + pool.V4Using = s.V4Using.Intersect(pool.V4IPs) + pool.V4Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + pool.V4Free = pool.V4Free.Intersect(pool.V4IPs).Separate(pool.V4Using).Separate(pool.V4Reserved) + } + if s.V6CIDR != nil { + if pool.V6IPs, err = NewIPRangeListFrom(v6IPs...); err != nil { + return err + } + for k, v := range s.IPPools { + if k == "" || k == name { + continue + } + if r := pool.V6IPs.Intersect(v.V6IPs); r.Len() != 0 { + return fmt.Errorf("ippool %s has conflict IPs with ippool %s: %s", name, k, r.String()) + } + } + + firstIP, _ := util.FirstIP(s.V6CIDR.String()) + lastIP, _ := util.LastIP(s.V6CIDR.String()) + pool.V6Reserved = s.V6Reserved.Intersect(pool.V6IPs) + pool.V6Using = s.V6Using.Intersect(pool.V6IPs) + pool.V6Free, _ = NewIPRangeListFrom(fmt.Sprintf("%s..%s", firstIP, lastIP)) + pool.V6Free = pool.V6Free.Intersect(pool.V6IPs).Separate(pool.V6Using).Separate(pool.V6Reserved) + } + + defaultPool := s.IPPools[""] + if p := s.IPPools[name]; p != nil { + defaultPool.V4IPs = defaultPool.V4IPs.Merge(p.V4IPs).Separate(pool.V4IPs) + defaultPool.V6IPs = defaultPool.V6IPs.Merge(p.V6IPs).Separate(pool.V6IPs) + defaultPool.V4Free = defaultPool.V4Available.Merge(p.V4Available).Separate(pool.V4Free) + defaultPool.V6Free = defaultPool.V6Available.Merge(p.V6Available).Separate(pool.V6Free) + defaultPool.V4Using = defaultPool.V4Using.Merge(p.V4Using).Separate(pool.V4Using) + defaultPool.V6Using = defaultPool.V6Using.Merge(p.V6Using).Separate(pool.V6Using) + defaultPool.V4Reserved = defaultPool.V4Reserved.Merge(p.V4Reserved).Separate(pool.V4Reserved) + defaultPool.V6Reserved = defaultPool.V6Reserved.Merge(p.V6Reserved).Separate(pool.V6Reserved) + defaultPool.V4Available = defaultPool.V4Free.Clone() + defaultPool.V6Available = defaultPool.V6Free.Clone() + } else { + defaultPool.V4IPs = defaultPool.V4IPs.Separate(pool.V4IPs) + defaultPool.V6IPs = defaultPool.V6IPs.Separate(pool.V6IPs) + defaultPool.V4Free = defaultPool.V4Available.Separate(pool.V4Free) + defaultPool.V6Free = defaultPool.V6Available.Separate(pool.V6Free) + defaultPool.V4Using = defaultPool.V4Using.Separate(pool.V4Using) + defaultPool.V6Using = defaultPool.V6Using.Separate(pool.V6Using) + defaultPool.V4Reserved = defaultPool.V4Reserved.Separate(pool.V4Reserved) + defaultPool.V6Reserved = defaultPool.V6Reserved.Separate(pool.V6Reserved) + defaultPool.V4Available = defaultPool.V4Free.Clone() + defaultPool.V6Available = defaultPool.V6Free.Clone() + } + defaultPool.V4Released = NewIPRangeList() + defaultPool.V6Released = NewIPRangeList() + pool.V4Available = pool.V4Free.Clone() + pool.V6Available = pool.V6Free.Clone() + s.IPPools[name] = pool + + return nil +} + +func (s *Subnet) RemoveIPPool(name string) { + s.mutex.Lock() + defer s.mutex.Unlock() + + p := s.IPPools[name] + if p == nil { + return + } + + defaultPool := s.IPPools[""] + defaultPool.V4Free = defaultPool.V4Free.Merge(p.V4Free) + defaultPool.V6Free = defaultPool.V6Free.Merge(p.V6Free) + defaultPool.V4Available = defaultPool.V4Available.Merge(p.V4Available) + defaultPool.V6Available = defaultPool.V6Available.Merge(p.V6Available) + defaultPool.V4Using = defaultPool.V4Using.Merge(p.V4Using) + defaultPool.V6Using = defaultPool.V6Using.Merge(p.V6Using) + defaultPool.V4Reserved = defaultPool.V4Reserved.Merge(p.V4Reserved) + defaultPool.V6Reserved = defaultPool.V6Reserved.Merge(p.V6Reserved) + defaultPool.V4Released = defaultPool.V4Released.Merge(p.V4Released) + defaultPool.V6Released = defaultPool.V6Released.Merge(p.V6Released) + + delete(s.IPPools, name) +} + +func (s *Subnet) IPPoolStatistics(ippool string) ( + v4Available, v4Using, v6Available, v6Using float64, + v4AvailableRange, v4UsingRange, v6AvailableRange, v6UsingRange string, +) { + s.mutex.Lock() + defer s.mutex.Unlock() + + p := s.IPPools[ippool] + if p == nil { + return + } + + v4Available = p.V4Available.Count() + v6Available = p.V6Available.Count() + v4Using = p.V4Using.Count() + v6Using = p.V6Using.Count() + v4AvailableRange = p.V4Available.String() + v6AvailableRange = p.V6Available.String() + v4UsingRange = p.V4Using.String() + v6UsingRange = p.V6Using.String() + + return +} diff --git a/pkg/util/validator.go b/pkg/util/validator.go index c8656d16d3f..6af7a22495b 100644 --- a/pkg/util/validator.go +++ b/pkg/util/validator.go @@ -227,17 +227,19 @@ func ValidatePodNetwork(annotations map[string]string) error { ipPool := annotations[IpPoolAnnotation] if ipPool != "" { - for _, ips := range strings.Split(ipPool, ";") { - if cidrStr := annotations[CidrAnnotation]; cidrStr != "" { - if !CIDRContainIP(cidrStr, ips) { - errors = append(errors, fmt.Errorf("%s not in cidr %s", ips, cidrStr)) - continue + if strings.ContainsRune(ipPool, ';') || strings.ContainsRune(ipPool, ',') || net.ParseIP(ipPool) != nil { + for _, ips := range strings.Split(ipPool, ";") { + if cidrStr := annotations[CidrAnnotation]; cidrStr != "" { + if !CIDRContainIP(cidrStr, ips) { + errors = append(errors, fmt.Errorf("%s not in cidr %s", ips, cidrStr)) + continue + } } - } - for _, ip := range strings.Split(ips, ",") { - if net.ParseIP(strings.TrimSpace(ip)) == nil { - errors = append(errors, fmt.Errorf("%s in %s is not a valid address", ip, IpPoolAnnotation)) + for _, ip := range strings.Split(ips, ",") { + if net.ParseIP(strings.TrimSpace(ip)) == nil { + errors = append(errors, fmt.Errorf("%s in %s is not a valid address", ip, IpPoolAnnotation)) + } } } } diff --git a/test/e2e/kube-ovn/subnet/subnet.go b/test/e2e/kube-ovn/subnet/subnet.go index 320dcbe53d3..bab32ecd674 100644 --- a/test/e2e/kube-ovn/subnet/subnet.go +++ b/test/e2e/kube-ovn/subnet/subnet.go @@ -716,20 +716,42 @@ var _ = framework.Describe("[group:subnet]", func() { podClient.WaitForRunning(podName) } - subnet = subnetClient.Get(subnetName) - if cidrV4 != "" { - v4UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(startIPv4), big.NewInt(int64(podCount-1)))) - v4AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(v4UsingIPEnd), big.NewInt(1))) - framework.ExpectEqual(subnet.Status.V4UsingIPRange, fmt.Sprintf("%s-%s", startIPv4, v4UsingIPEnd)) - framework.ExpectEqual(subnet.Status.V4AvailableIPRange, fmt.Sprintf("%s-%s", v4AvailableIPStart, lastIPv4)) - } - - if cidrV6 != "" { - v6UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(startIPv6), big.NewInt(int64(podCount-1)))) - v6AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(v6UsingIPEnd), big.NewInt(1))) - framework.ExpectEqual(subnet.Status.V6UsingIPRange, fmt.Sprintf("%s-%s", startIPv6, v6UsingIPEnd)) - framework.ExpectEqual(subnet.Status.V6AvailableIPRange, fmt.Sprintf("%s-%s", v6AvailableIPStart, lastIPv6)) - } + framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { + subnet = subnetClient.Get(subnetName) + if cidrV4 != "" { + v4UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(startIPv4), big.NewInt(int64(podCount-1)))) + v4AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(v4UsingIPEnd), big.NewInt(1))) + framework.Logf("V4UsingIPRange: expected %q, current %q", + fmt.Sprintf("%s-%s", startIPv4, v4UsingIPEnd), + subnet.Status.V4UsingIPRange, + ) + framework.Logf("V4AvailableIPRange: expected %q, current %q", + fmt.Sprintf("%s-%s", v4AvailableIPStart, lastIPv4), + subnet.Status.V4AvailableIPRange, + ) + if subnet.Status.V4UsingIPRange != fmt.Sprintf("%s-%s", startIPv4, v4UsingIPEnd) || + subnet.Status.V4AvailableIPRange != fmt.Sprintf("%s-%s", v4AvailableIPStart, lastIPv4) { + return false, nil + } + } + if cidrV6 != "" { + v6UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(startIPv6), big.NewInt(int64(podCount-1)))) + v6AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(v6UsingIPEnd), big.NewInt(1))) + framework.Logf("V6UsingIPRange: expected %q, current %q", + fmt.Sprintf("%s-%s", startIPv6, v6UsingIPEnd), + subnet.Status.V6UsingIPRange, + ) + framework.Logf("V6AvailableIPRange: expected %q, current %q", + fmt.Sprintf("%s-%s", v6AvailableIPStart, lastIPv6), + subnet.Status.V6AvailableIPRange, + ) + if subnet.Status.V6UsingIPRange != fmt.Sprintf("%s-%s", startIPv6, v6UsingIPEnd) || + subnet.Status.V6AvailableIPRange != fmt.Sprintf("%s-%s", v6AvailableIPStart, lastIPv6) { + return false, nil + } + } + return true, nil + }, "") for i := 1; i <= podCount; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) @@ -861,24 +883,30 @@ var _ = framework.Describe("[group:subnet]", func() { deploy := framework.MakeDeployment(deployName, int32(replicas), labels, annotations, "pause", framework.PauseImage, "") deploy = deployClient.CreateSync(deploy) - checkFunc := func(usingIPRange, availableIPRange, startIP, lastIP string, count int64) { + checkFunc := func(usingIPRange, availableIPRange, startIP, lastIP string, count int64) bool { if startIP == "" { - return + return true } usingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(startIP), big.NewInt(count-1))) availableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.Ip2BigInt(usingIPEnd), big.NewInt(1))) framework.Logf(`subnet status usingIPRange %q expect "%s-%s"`, usingIPRange, startIP, usingIPEnd) - framework.ExpectEqual(usingIPRange, fmt.Sprintf("%s-%s", startIP, usingIPEnd)) + if usingIPRange != fmt.Sprintf("%s-%s", startIP, usingIPEnd) { + return false + } framework.Logf(`subnet status availableIPRange %q expect "%s-%s"`, availableIPRange, availableIPStart, lastIP) - framework.ExpectEqual(availableIPRange, fmt.Sprintf("%s-%s", availableIPStart, lastIP)) + return availableIPRange == fmt.Sprintf("%s-%s", availableIPStart, lastIP) } ginkgo.By("Checking subnet status") - subnet = subnetClient.Get(subnetName) - checkFunc(subnet.Status.V4UsingIPRange, subnet.Status.V4AvailableIPRange, startIPv4, lastIPv4, replicas) - checkFunc(subnet.Status.V6UsingIPRange, subnet.Status.V6AvailableIPRange, startIPv6, lastIPv6, replicas) + framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { + subnet = subnetClient.Get(subnetName) + if !checkFunc(subnet.Status.V4UsingIPRange, subnet.Status.V4AvailableIPRange, startIPv4, lastIPv4, replicas) { + return false, nil + } + return checkFunc(subnet.Status.V6UsingIPRange, subnet.Status.V6AvailableIPRange, startIPv6, lastIPv6, replicas), nil + }, "") ginkgo.By("Restarting deployment " + deployName) _ = deployClient.RestartSync(deploy) diff --git a/test/e2e/kube-ovn/underlay/underlay.go b/test/e2e/kube-ovn/underlay/underlay.go index a4c4299b2ba..05829b56733 100644 --- a/test/e2e/kube-ovn/underlay/underlay.go +++ b/test/e2e/kube-ovn/underlay/underlay.go @@ -3,7 +3,6 @@ package underlay import ( "context" "fmt" - "github.com/kubeovn/kube-ovn/pkg/ipam" "net" "os/exec" "strconv" @@ -19,6 +18,7 @@ import ( "github.com/onsi/ginkgo/v2" apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" + "github.com/kubeovn/kube-ovn/pkg/ipam" "github.com/kubeovn/kube-ovn/pkg/util" "github.com/kubeovn/kube-ovn/test/e2e/framework" "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" @@ -627,16 +627,16 @@ var _ = framework.SerialDescribe("[group:underlay]", func() { for index := 0; index < 2; index++ { getAvailableIPs := func(subnet *apiv1.Subnet) string { var availIPs []string - v4Cidr, v6Cidr := util.SplitStringIP(subnet.Spec.CIDRBlock) if v4Cidr != "" { startIP := strings.Split(v4Cidr, "/")[0] - availIPs = append(availIPs, ipam.NewIP(startIP).Add(100+int64(index)).String()) + ip, _ := ipam.NewIP(startIP) + availIPs = append(availIPs, ip.Add(100+int64(index)).String()) } - if v6Cidr != "" { startIP := strings.Split(v6Cidr, "/")[0] - availIPs = append(availIPs, ipam.NewIP(startIP).Add(100+int64(index)).String()) + ip, _ := ipam.NewIP(startIP) + availIPs = append(availIPs, ip.Add(100+int64(index)).String()) } return strings.Join(availIPs, ",") } diff --git a/test/unittest/ipam/ipam.go b/test/unittest/ipam/ipam.go index 2314c2c68ef..23ce7d14e62 100644 --- a/test/unittest/ipam/ipam.go +++ b/test/unittest/ipam/ipam.go @@ -64,12 +64,12 @@ var _ = Describe("[IPAM]", func() { pod1 := "pod1.ns" pod1Nic1 := "pod1nic1.ns" - freeIp1 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() + freeIp1 := im.Subnets[subnetName].V4Free.At(0).Start().String() ip, _, _, err := im.GetStaticAddress(pod1, pod1Nic1, freeIp1, nil, subnetName, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp1)) - ip, _, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, nil, true) + ip, _, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp1)) @@ -78,13 +78,14 @@ var _ = Describe("[IPAM]", func() { pod2Nic1 := "pod2Nic1.ns" pod2Nic2 := "pod2Nic2.ns" - freeIp2 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() - ip, _, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, nil, true) + freeIp2 := im.Subnets[subnetName].V4Free.At(0).Start().String() + ip, _, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp2)) - freeIp3 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() - ip, _, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, nil, true) + freeIp3 := im.Subnets[subnetName].V4Free.At(0).Start().String() + ip, _, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, "", nil, true) + Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp3)) @@ -112,12 +113,18 @@ var _ = Describe("[IPAM]", func() { By("release pod with multiple nics") im.ReleaseAddressByPod(pod2) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp2))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp3))).Should(BeTrue()) + ip2, err := ipam.NewIP(freeIp2) + Expect(err).ShouldNot(HaveOccurred()) + ip3, err := ipam.NewIP(freeIp3) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip2)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip3)).Should(BeTrue()) By("release pod with single nic") im.ReleaseAddressByPod(pod1) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp1))).To(BeTrue()) + ip1, err := ipam.NewIP(freeIp1) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip1)).To(BeTrue()) By("create new pod with released ips") pod4 := "pod4.ns" @@ -130,7 +137,7 @@ var _ = Describe("[IPAM]", func() { pod5 := "pod5.ns" pod5Nic1 := "pod5Nic1.ns" - _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", nil, true) + _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) @@ -141,7 +148,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "10.17.0.0/16", v4Gw, []string{"10.17.0.1"}) Expect(err).ShouldNot(HaveOccurred()) - ip, _, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, nil, true) + ip, _, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("10.17.0.2")) @@ -156,17 +163,17 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "10.16.0.0/30", v4Gw, nil) Expect(err).ShouldNot(HaveOccurred()) - ip, _, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ip, _, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("10.16.0.1")) im.ReleaseAddressByPod("pod1.ns") - ip, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ip, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("10.16.0.2")) im.ReleaseAddressByPod("pod1.ns") - ip, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ip, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("10.16.0.1")) }) @@ -176,7 +183,7 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "10.16.0.0/30", v4Gw, nil) Expect(err).ShouldNot(HaveOccurred()) - ip, _, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ip, _, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("10.16.0.1")) @@ -184,7 +191,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "10.16.0.0/30", v4Gw, []string{"10.16.0.1..10.16.0.2"}) Expect(err).ShouldNot(HaveOccurred()) - _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) }) @@ -212,13 +219,13 @@ var _ = Describe("[IPAM]", func() { pod1 := "pod1.ns" pod1Nic1 := "pod1nic1.ns" - freeIp1 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() + freeIp1 := im.Subnets[subnetName].V6Free.At(0).Start().String() _, ip, _, err := im.GetStaticAddress(pod1, pod1Nic1, freeIp1, nil, subnetName, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp1)) - _, ip, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, nil, true) + _, ip, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp1)) @@ -227,13 +234,13 @@ var _ = Describe("[IPAM]", func() { pod2Nic1 := "pod2Nic1.ns" pod2Nic2 := "pod2Nic2.ns" - freeIp2 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() - _, ip, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, nil, true) + freeIp2 := im.Subnets[subnetName].V6Free.At(0).Start().String() + _, ip, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp2)) - freeIp3 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() - _, ip, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, nil, true) + freeIp3 := im.Subnets[subnetName].V6Free.At(0).Start().String() + _, ip, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal(freeIp3)) @@ -261,12 +268,18 @@ var _ = Describe("[IPAM]", func() { By("release pod with multiple nics") im.ReleaseAddressByPod(pod2) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp2))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp3))).Should(BeTrue()) + ip2, err := ipam.NewIP(freeIp2) + Expect(err).ShouldNot(HaveOccurred()) + ip3, err := ipam.NewIP(freeIp3) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip2)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip3)).Should(BeTrue()) By("release pod with single nic") im.ReleaseAddressByPod(pod1) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp1))).Should(BeTrue()) + ip1, err := ipam.NewIP(freeIp1) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip1)).Should(BeTrue()) By("create new pod with released ips") pod4 := "pod4.ns" @@ -279,7 +292,7 @@ var _ = Describe("[IPAM]", func() { pod5 := "pod5.ns" pod5Nic1 := "pod5Nic1.ns" - _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", nil, true) + _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) @@ -290,7 +303,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "fe00::/112", v6Gw, []string{"fe00::1"}) Expect(err).ShouldNot(HaveOccurred()) - _, ip, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, nil, true) + _, ip, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("fe00::2")) @@ -305,17 +318,17 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "fd00::/126", v6Gw, nil) Expect(err).ShouldNot(HaveOccurred()) - _, ip, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, ip, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("fd00::1")) im.ReleaseAddressByPod("pod1.ns") - _, ip, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, ip, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("fd00::2")) im.ReleaseAddressByPod("pod1.ns") - _, ip, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, ip, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("fd00::1")) }) @@ -325,7 +338,7 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "fd00::/126", v6Gw, nil) Expect(err).ShouldNot(HaveOccurred()) - _, ip, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, ip, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip).To(Equal("fd00::1")) @@ -333,7 +346,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "fd00::/126", v6Gw, []string{"fd00::1..fd00::2"}) Expect(err).ShouldNot(HaveOccurred()) - _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) }) @@ -361,15 +374,15 @@ var _ = Describe("[IPAM]", func() { pod1 := "pod1.ns" pod1Nic1 := "pod1nic1.ns" - freeIp41 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() - freeIp61 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() + freeIp41 := im.Subnets[subnetName].V4Free.At(0).Start().String() + freeIp61 := im.Subnets[subnetName].V6Free.At(0).Start().String() dualIp := fmt.Sprintf("%s,%s", freeIp41, freeIp61) ip4, ip6, _, err := im.GetStaticAddress(pod1, pod1Nic1, dualIp, nil, subnetName, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip4).To(Equal(freeIp41)) Expect(ip6).To(Equal(freeIp61)) - ip4, ip6, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, nil, true) + ip4, ip6, _, err = im.GetRandomAddress(pod1, pod1Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip4).To(Equal(freeIp41)) Expect(ip6).To(Equal(freeIp61)) @@ -379,16 +392,16 @@ var _ = Describe("[IPAM]", func() { pod2Nic1 := "pod2Nic1.ns" pod2Nic2 := "pod2Nic2.ns" - freeIp42 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() - freeIp62 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() - ip4, ip6, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, nil, true) + freeIp42 := im.Subnets[subnetName].V4Free.At(0).Start().String() + freeIp62 := im.Subnets[subnetName].V6Free.At(0).Start().String() + ip4, ip6, _, err = im.GetRandomAddress(pod2, pod2Nic1, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip4).To(Equal(freeIp42)) Expect(ip6).To(Equal(freeIp62)) - freeIp43 := im.Subnets[subnetName].V4FreeIPList.At(0).Start().String() - freeIp63 := im.Subnets[subnetName].V6FreeIPList.At(0).Start().String() - ip4, ip6, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, nil, true) + freeIp43 := im.Subnets[subnetName].V4Free.At(0).Start().String() + freeIp63 := im.Subnets[subnetName].V6Free.At(0).Start().String() + ip4, ip6, _, err = im.GetRandomAddress(pod2, pod2Nic2, nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip4).To(Equal(freeIp43)) Expect(ip6).To(Equal(freeIp63)) @@ -429,15 +442,27 @@ var _ = Describe("[IPAM]", func() { By("release pod with multiple nics") im.ReleaseAddressByPod(pod2) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp42))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp43))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp62))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp63))).Should(BeTrue()) + ip42, err := ipam.NewIP(freeIp42) + Expect(err).ShouldNot(HaveOccurred()) + ip43, err := ipam.NewIP(freeIp43) + Expect(err).ShouldNot(HaveOccurred()) + ip62, err := ipam.NewIP(freeIp62) + Expect(err).ShouldNot(HaveOccurred()) + ip63, err := ipam.NewIP(freeIp63) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip42)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip43)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip62)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip63)).Should(BeTrue()) By("release pod with single nic") im.ReleaseAddressByPod(pod1) - Expect(im.Subnets[subnetName].V4ReleasedIPList.Contains(ipam.NewIP(freeIp41))).Should(BeTrue()) - Expect(im.Subnets[subnetName].V6ReleasedIPList.Contains(ipam.NewIP(freeIp61))).Should(BeTrue()) + ip41, err := ipam.NewIP(freeIp41) + Expect(err).ShouldNot(HaveOccurred()) + ip61, err := ipam.NewIP(freeIp61) + Expect(err).ShouldNot(HaveOccurred()) + Expect(im.Subnets[subnetName].IPPools[""].V4Released.Contains(ip41)).Should(BeTrue()) + Expect(im.Subnets[subnetName].IPPools[""].V6Released.Contains(ip61)).Should(BeTrue()) By("create new pod with released ips") pod4 := "pod4.ns" @@ -453,7 +478,7 @@ var _ = Describe("[IPAM]", func() { pod5 := "pod5.ns" pod5Nic1 := "pod5Nic1.ns" - _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", nil, true) + _, _, _, err = im.GetRandomAddress(pod5, pod5Nic1, nil, "invalid_subnet", "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) @@ -465,7 +490,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "10.17.0.2/16,fe00::/112", dualGw, []string{"10.17.0.1", "fe00::1"}) Expect(err).ShouldNot(HaveOccurred()) - ipv4, ipv6, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, nil, true) + ipv4, ipv6, _, err := im.GetRandomAddress("pod5.ns", "pod5.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4).To(Equal("10.17.0.2")) Expect(ipv6).To(Equal("fe00::2")) @@ -476,19 +501,19 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "10.16.0.2/30,fd00::/126", dualGw, nil) Expect(err).ShouldNot(HaveOccurred()) - ipv4, ipv6, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ipv4, ipv6, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4).To(Equal("10.16.0.1")) Expect(ipv6).To(Equal("fd00::1")) im.ReleaseAddressByPod("pod1.ns") - ipv4, ipv6, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ipv4, ipv6, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4).To(Equal("10.16.0.2")) Expect(ipv6).To(Equal("fd00::2")) im.ReleaseAddressByPod("pod1.ns") - ipv4, ipv6, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ipv4, ipv6, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4).To(Equal("10.16.0.1")) Expect(ipv6).To(Equal("fd00::1")) @@ -499,7 +524,7 @@ var _ = Describe("[IPAM]", func() { err := im.AddOrUpdateSubnet(subnetName, "10.16.0.2/30,fd00::/126", dualGw, nil) Expect(err).ShouldNot(HaveOccurred()) - ipv4, ipv6, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + ipv4, ipv6, _, err := im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4).To(Equal("10.16.0.1")) Expect(ipv6).To(Equal("fd00::1")) @@ -508,7 +533,7 @@ var _ = Describe("[IPAM]", func() { err = im.AddOrUpdateSubnet(subnetName, "10.16.0.2/30,fd00::/126", dualGw, []string{"10.16.0.1..10.16.0.2", "fd00::1..fd00::2"}) Expect(err).ShouldNot(HaveOccurred()) - _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, nil, true) + _, _, _, err = im.GetRandomAddress("pod1.ns", "pod1.ns", nil, subnetName, "", nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) }) }) @@ -516,8 +541,8 @@ var _ = Describe("[IPAM]", func() { Describe("[IP]", func() { It("IPv4 operation", func() { - ip1 := ipam.NewIP("10.0.0.16") - ip2 := ipam.NewIP("10.0.0.17") + ip1, _ := ipam.NewIP("10.0.0.16") + ip2, _ := ipam.NewIP("10.0.0.17") Expect(ip1.Equal(ip1)).To(BeTrue()) Expect(ip1.GreaterThan(ip1)).To(BeFalse()) @@ -532,17 +557,19 @@ var _ = Describe("[IPAM]", func() { Expect(ip1.Sub(-1)).To(Equal(ip2)) Expect(ip2.Sub(1)).To(Equal(ip1)) - ipr := ipam.NewIPRange(ipam.NewIP("10.0.0.1"), ipam.NewIP("10.0.0.254")) + first, _ := ipam.NewIP("10.0.0.1") + last, _ := ipam.NewIP("10.0.0.254") + ipr := ipam.NewIPRange(first, last) Expect(ipr.Contains(ip1)).To(BeTrue()) Expect(ipr.Contains(ip2)).To(BeTrue()) - iprList := ipam.NewIPRangeListFrom(fmt.Sprintf("%s..%s", ipr.Start(), ipr.End())) + iprList, _ := ipam.NewIPRangeListFrom(fmt.Sprintf("%s..%s", ipr.Start(), ipr.End())) Expect(iprList.Contains(ip1)).To(BeTrue()) }) It("IPv6 operation", func() { - ip1 := ipam.NewIP("fd00::16") - ip2 := ipam.NewIP("fd00::17") + ip1, _ := ipam.NewIP("fd00::16") + ip2, _ := ipam.NewIP("fd00::17") Expect(ip1.Equal(ip1)).To(BeTrue()) Expect(ip1.GreaterThan(ip1)).To(BeFalse()) @@ -557,11 +584,13 @@ var _ = Describe("[IPAM]", func() { Expect(ip1.Sub(-1)).To(Equal(ip2)) Expect(ip2.Sub(1)).To(Equal(ip1)) - ipr := ipam.NewIPRange(ipam.NewIP("fd00::01"), ipam.NewIP("fd00::ff")) + first, _ := ipam.NewIP("fd00::01") + last, _ := ipam.NewIP("fd00::ff") + ipr := ipam.NewIPRange(first, last) Expect(ipr.Contains(ip1)).To(BeTrue()) Expect(ipr.Contains(ip2)).To(BeTrue()) - iprList := ipam.NewIPRangeListFrom(fmt.Sprintf("%s..%s", ipr.Start(), ipr.End())) + iprList, _ := ipam.NewIPRangeListFrom(fmt.Sprintf("%s..%s", ipr.Start(), ipr.End())) Expect(iprList.Contains(ip1)).To(BeTrue()) }) }) @@ -572,18 +601,21 @@ var _ = Describe("[IPAM]", func() { subnet, err := ipam.NewSubnet(subnetName, ipv4CIDR, ipv4ExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) Expect(subnet.Name).To(Equal(subnetName)) - Expect(subnet.V4ReservedIPList.Len()).To(Equal(len(ipv4ExcludeIPs) - 2)) - Expect(subnet.V4FreeIPList.Len()).To(Equal(3)) - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.2..10.16.0.3", - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) + Expect(subnet.IPPools[""].V4Reserved.Len()).To(Equal(len(ipv4ExcludeIPs) - 2)) + Expect(subnet.V4Free.Len()).To(Equal(3)) + expected, _ := ipam.NewIPRangeListFrom( + "10.16.0.2..10.16.0.3", + "10.16.0.5..10.16.0.9", + "10.16.0.24..10.16.255.254", + ) + Expect(subnet.V4Free).To(Equal(expected)) }) It("static allocation", func() { + ip2, _ := ipam.NewIP("10.16.0.2") + ip3, _ := ipam.NewIP("10.16.0.3") + ip20, _ := ipam.NewIP("10.16.0.20") + subnet, err := ipam.NewSubnet(subnetName, ipv4CIDR, ipv4ExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) @@ -591,48 +623,43 @@ var _ = Describe("[IPAM]", func() { pod1Nic1 := "pod1Nic1.ns" pod1Nic1mac := util.GenerateMac() - _, _, err = subnet.GetStaticAddress(pod1, pod1Nic1, ipam.NewIP("10.16.0.2"), &pod1Nic1mac, false, true) + _, _, err = subnet.GetStaticAddress(pod1, pod1Nic1, ip2, &pod1Nic1mac, false, true) Expect(err).ShouldNot(HaveOccurred()) pod2 := "pod2.ns" pod2Nic1 := "pod2Nic1" - _, _, err = subnet.GetStaticAddress(pod2, pod2Nic1, ipam.NewIP("10.16.0.3"), nil, false, true) + _, _, err = subnet.GetStaticAddress(pod2, pod2Nic1, ip3, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) pod2Nic2 := "pod2Nic2" - _, _, err = subnet.GetStaticAddress(pod2, pod2Nic2, ipam.NewIP("10.16.0.20"), nil, false, true) + _, _, err = subnet.GetStaticAddress(pod2, pod2Nic2, ip20, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) + expected, _ := ipam.NewIPRangeListFrom( + "10.16.0.5..10.16.0.9", + "10.16.0.24..10.16.255.254", + ) + Expect(subnet.V4Free).To(Equal(expected)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.2", pod1)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.3", pod2)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.20", pod2)) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod1Nic1, ipam.NewIP("10.16.0.2"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod2Nic1, ipam.NewIP("10.16.0.3"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod2Nic2, ipam.NewIP("10.16.0.20"))) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod1Nic1, ip2)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod2Nic1, ip3)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue(pod2Nic2, ip20)) Expect(subnet.NicToMac).To(HaveKeyWithValue(pod1Nic1, pod1Nic1mac)) Expect(subnet.MacToPod).To(HaveKeyWithValue(pod1Nic1mac, pod1)) - _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ipam.NewIP("10.16.0.3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ip3, nil, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) - _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ipam.NewIP("19.16.0.3"), nil, false, true) - Expect(err).Should(MatchError(ipam.ErrOutOfRange)) - _, _, err = subnet.GetStaticAddress("pod6.ns", "pod5.ns", ipam.NewIP("10.16.0.121"), &pod1Nic1mac, false, true) + _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ip3, nil, false, true) + Expect(err).Should(MatchError(ipam.ErrConflict)) + ip, _ := ipam.NewIP("10.16.0.121") + _, _, err = subnet.GetStaticAddress("pod6.ns", "pod5.ns", ip, &pod1Nic1mac, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) subnet.ReleaseAddress(pod1) subnet.ReleaseAddress(pod2) - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) + Expect(subnet.V4Free).To(Equal(expected)) Expect(subnet.V4NicToIP).To(BeEmpty()) Expect(subnet.V4IPToPod).To(BeEmpty()) @@ -642,34 +669,33 @@ var _ = Describe("[IPAM]", func() { subnet, err := ipam.NewSubnet(subnetName, "10.16.0.0/30", nil) Expect(err).ShouldNot(HaveOccurred()) - ip1, _, _, err := subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + ip1, _, _, err := subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip1.String()).To(Equal("10.16.0.1")) - ip1, _, _, err = subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + ip1, _, _, err = subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip1.String()).To(Equal("10.16.0.1")) - ip2, _, _, err := subnet.GetRandomAddress("pod2.ns", "pod2.ns", nil, nil, true) + ip2, _, _, err := subnet.GetRandomAddress("", "pod2.ns", "pod2.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip2.String()).To(Equal("10.16.0.2")) - _, _, _, err = subnet.GetRandomAddress("pod3.ns", "pod3.ns", nil, nil, true) + _, _, _, err = subnet.GetRandomAddress("", "pod3.ns", "pod3.ns", nil, nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) - Expect(subnet.V4FreeIPList.Len()).To(Equal(0)) + Expect(subnet.V4Free.Len()).To(Equal(0)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.1", "pod1.ns")) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.2", "pod2.ns")) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("10.16.0.1"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("10.16.0.2"))) + ip01, _ := ipam.NewIP("10.16.0.1") + ip02, _ := ipam.NewIP("10.16.0.2") + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ip01)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ip02)) subnet.ReleaseAddress("pod1.ns") subnet.ReleaseAddress("pod2.ns") - Expect(subnet.V4FreeIPList.Len()).To(Equal(0)) - Expect(subnet.V4ReleasedIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.1..10.16.0.2", - ), - )) + Expect(subnet.V4Free.Len()).To(Equal(0)) + expected, _ := ipam.NewIPRangeListFrom("10.16.0.1..10.16.0.2") + Expect(subnet.IPPools[""].V4Released).To(Equal(expected)) Expect(subnet.V4IPToPod).To(BeEmpty()) Expect(subnet.V4NicToIP).To(BeEmpty()) }) @@ -680,18 +706,22 @@ var _ = Describe("[IPAM]", func() { subnet, err := ipam.NewSubnet(subnetName, ipv6CIDR, ipv6ExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) Expect(subnet.Name).To(Equal(subnetName)) - Expect(subnet.V6ReservedIPList.Len()).To(Equal(len(ipv6ExcludeIPs) - 2)) - Expect(subnet.V6FreeIPList.Len()).To(Equal(3)) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::2..fd00::3", - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + Expect(subnet.IPPools[""].V6Reserved.Len()).To(Equal(len(ipv6ExcludeIPs) - 2)) + Expect(subnet.V6Free.Len()).To(Equal(3)) + expected, _ := ipam.NewIPRangeListFrom( + "fd00::2..fd00::3", + "fd00::5..fd00::9", + "fd00::18..fd00::fffe", + ) + Expect(subnet.V6Free).To(Equal(expected)) }) It("static allocation", func() { + ip2, _ := ipam.NewIP("fd00::2") + ip3, _ := ipam.NewIP("fd00::3") + ip14, _ := ipam.NewIP("fd00::14") + ipf9, _ := ipam.NewIP("fd00::f9") + subnet, err := ipam.NewSubnet(subnetName, ipv6CIDR, ipv6ExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) @@ -699,86 +729,80 @@ var _ = Describe("[IPAM]", func() { pod1Nic1 := "pod1Nic1.ns" pod1Nic1mac := util.GenerateMac() - _, _, err = subnet.GetStaticAddress(pod1, pod1Nic1, ipam.NewIP("fd00::2"), &pod1Nic1mac, false, true) + _, _, err = subnet.GetStaticAddress(pod1, pod1Nic1, ip2, &pod1Nic1mac, false, true) Expect(err).ShouldNot(HaveOccurred()) pod2 := "pod2.ns" pod2Nic1 := "pod2Nic1.ns" - _, _, err = subnet.GetStaticAddress(pod2, pod2Nic1, ipam.NewIP("fd00::3"), nil, false, true) + _, _, err = subnet.GetStaticAddress(pod2, pod2Nic1, ip3, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) pod2Nic2 := "pod2Nic2.ns" - _, _, err = subnet.GetStaticAddress(pod2, pod2Nic2, ipam.NewIP("fd00::14"), nil, false, true) + _, _, err = subnet.GetStaticAddress(pod2, pod2Nic2, ip14, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + expected, _ := ipam.NewIPRangeListFrom( + "fd00::5..fd00::9", + "fd00::18..fd00::fffe", + ) + Expect(subnet.V6Free).To(Equal(expected)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::2", pod1)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::3", pod2)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::14", pod2)) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod1Nic1, ipam.NewIP("fd00::2"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod2Nic1, ipam.NewIP("fd00::3"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod2Nic2, ipam.NewIP("fd00::14"))) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod1Nic1, ip2)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod2Nic1, ip3)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue(pod2Nic2, ip14)) Expect(subnet.NicToMac).To(HaveKeyWithValue(pod1Nic1, pod1Nic1mac)) Expect(subnet.MacToPod).To(HaveKeyWithValue(pod1Nic1mac, pod1)) - _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ipam.NewIP("fd00::3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ip3, nil, false, true) + Expect(err).Should(MatchError(ipam.ErrConflict)) + _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ip3, nil, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) - _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ipam.NewIP("fe00::3"), nil, false, true) - Expect(err).Should(MatchError(ipam.ErrOutOfRange)) - _, _, err = subnet.GetStaticAddress("pod6.ns", "pod5.ns", ipam.NewIP("fd00::f9"), &pod1Nic1mac, false, true) + _, _, err = subnet.GetStaticAddress("pod6.ns", "pod5.ns", ipf9, &pod1Nic1mac, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) subnet.ReleaseAddress(pod1) subnet.ReleaseAddress(pod2) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + Expect(subnet.V6Free).To(Equal(expected)) Expect(subnet.V6NicToIP).To(BeEmpty()) Expect(subnet.V6IPToPod).To(BeEmpty()) }) It("random allocation", func() { + ip01, _ := ipam.NewIP("fd00::1") + ip02, _ := ipam.NewIP("fd00::2") + subnet, err := ipam.NewSubnet(subnetName, "fd00::/126", nil) Expect(err).ShouldNot(HaveOccurred()) - _, ip1, _, err := subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + _, ip1, _, err := subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip1.String()).To(Equal("fd00::1")) - _, ip1, _, err = subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + _, ip1, _, err = subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip1.String()).To(Equal("fd00::1")) - _, ip2, _, err := subnet.GetRandomAddress("pod2.ns", "pod2.ns", nil, nil, true) + _, ip2, _, err := subnet.GetRandomAddress("", "pod2.ns", "pod2.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ip2.String()).To(Equal("fd00::2")) - _, _, _, err = subnet.GetRandomAddress("pod3.ns", "pod3.ns", nil, nil, true) + _, _, _, err = subnet.GetRandomAddress("", "pod3.ns", "pod3.ns", nil, nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) - Expect(subnet.V6FreeIPList.Len()).To(Equal(0)) + Expect(subnet.V6Free.Len()).To(Equal(0)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::1", "pod1.ns")) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::2", "pod2.ns")) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("fd00::1"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("fd00::2"))) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ip01)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ip02)) subnet.ReleaseAddress("pod1.ns") subnet.ReleaseAddress("pod2.ns") - Expect(subnet.V6FreeIPList.Len()).To(Equal(0)) - Expect(subnet.V6ReleasedIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::1..fd00::2", - ), - )) + Expect(subnet.V6Free.Len()).To(Equal(0)) + expected, _ := ipam.NewIPRangeListFrom("fd00::1..fd00::2") + Expect(subnet.IPPools[""].V6Released).To(Equal(expected)) Expect(subnet.V6IPToPod).To(BeEmpty()) Expect(subnet.V6NicToIP).To(BeEmpty()) }) @@ -789,92 +813,85 @@ var _ = Describe("[IPAM]", func() { subnet, err := ipam.NewSubnet(subnetName, dualCIDR, dualExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) Expect(subnet.Name).To(Equal(subnetName)) - Expect(subnet.V4ReservedIPList.Len()).To(Equal(len(ipv4ExcludeIPs) - 2)) - Expect(subnet.V4FreeIPList.Len()).To(Equal(3)) - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.2..10.16.0.3", - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) - Expect(subnet.V6ReservedIPList.Len()).To(Equal(len(ipv6ExcludeIPs) - 2)) - Expect(subnet.V6FreeIPList.Len()).To(Equal(3)) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::2..fd00::3", - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + Expect(subnet.V4Reserved.Len()).To(Equal(len(ipv4ExcludeIPs) - 2)) + Expect(subnet.V4Free.Len()).To(Equal(3)) + expectedV4, _ := ipam.NewIPRangeListFrom( + "10.16.0.2..10.16.0.3", + "10.16.0.5..10.16.0.9", + "10.16.0.24..10.16.255.254", + ) + Expect(subnet.V4Free).To(Equal(expectedV4)) + Expect(subnet.IPPools[""].V6Reserved.Len()).To(Equal(len(ipv6ExcludeIPs) - 2)) + Expect(subnet.V6Free.Len()).To(Equal(3)) + expectedV6, _ := ipam.NewIPRangeListFrom( + "fd00::2..fd00::3", + "fd00::5..fd00::9", + "fd00::18..fd00::fffe", + ) + Expect(subnet.V6Free).To(Equal(expectedV6)) }) It("static allocation", func() { + ip2V4, _ := ipam.NewIP("10.16.0.2") + ip3V4, _ := ipam.NewIP("10.16.0.3") + ip20V4, _ := ipam.NewIP("10.16.0.20") + ip2V6, _ := ipam.NewIP("fd00::2") + ip3V6, _ := ipam.NewIP("fd00::3") + ip14V6, _ := ipam.NewIP("fd00::14") + subnet, err := ipam.NewSubnet(subnetName, dualCIDR, dualExcludeIPs) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod1.ns", "pod1.ns", ipam.NewIP("10.16.0.2"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod1.ns", "pod1.ns", ip2V4, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod1.ns", "pod1.ns", ipam.NewIP("fd00::2"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod1.ns", "pod1.ns", ip2V6, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod2.ns", "pod2.ns", ipam.NewIP("10.16.0.3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod2.ns", "pod2.ns", ip3V4, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod2.ns", "pod2.ns", ipam.NewIP("fd00::3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod2.ns", "pod2.ns", ip3V6, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod3.ns", "pod3.ns", ipam.NewIP("10.16.0.20"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod3.ns", "pod3.ns", ip20V4, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - _, _, err = subnet.GetStaticAddress("pod3.ns", "pod3.ns", ipam.NewIP("fd00::14"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod3.ns", "pod3.ns", ip14V6, nil, false, true) Expect(err).ShouldNot(HaveOccurred()) - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + expectedV4, _ := ipam.NewIPRangeListFrom( + "10.16.0.5..10.16.0.9", + "10.16.0.24..10.16.255.254", + ) + Expect(subnet.V4Free).To(Equal(expectedV4)) + expectedV6, _ := ipam.NewIPRangeListFrom( + "fd00::5..fd00::9", + "fd00::18..fd00::fffe", + ) + Expect(subnet.V6Free).To(Equal(expectedV6)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.2", "pod1.ns")) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.3", "pod2.ns")) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.20", "pod3.ns")) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("10.16.0.2"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("10.16.0.3"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod3.ns", ipam.NewIP("10.16.0.20"))) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ip2V4)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ip3V4)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod3.ns", ip20V4)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::2", "pod1.ns")) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::3", "pod2.ns")) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::14", "pod3.ns")) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("fd00::2"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("fd00::3"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod3.ns", ipam.NewIP("fd00::14"))) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ip2V6)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ip3V6)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod3.ns", ip14V6)) - _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ipam.NewIP("10.16.0.3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ip3V4, nil, false, true) + Expect(err).Should(MatchError(ipam.ErrConflict)) + _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ip3V6, nil, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) - _, _, err = subnet.GetStaticAddress("pod4.ns", "pod4.ns", ipam.NewIP("fd00::3"), nil, false, true) + _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ip3V4, nil, false, true) + Expect(err).Should(MatchError(ipam.ErrConflict)) + _, _, err = subnet.GetStaticAddress("pod1.ns", "pod5.ns", ip3V6, nil, false, true) Expect(err).Should(MatchError(ipam.ErrConflict)) - _, _, err = subnet.GetStaticAddress("pod5.ns", "pod5.ns", ipam.NewIP("19.16.0.3"), nil, false, true) - Expect(err).Should(MatchError(ipam.ErrOutOfRange)) - _, _, err = subnet.GetStaticAddress("pod1.ns", "pod5.ns", ipam.NewIP("fe00::3"), nil, false, true) - Expect(err).Should(MatchError(ipam.ErrOutOfRange)) subnet.ReleaseAddress("pod1.ns") subnet.ReleaseAddress("pod2.ns") subnet.ReleaseAddress("pod3.ns") - Expect(subnet.V4FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.5..10.16.0.9", - "10.16.0.24..10.16.255.254", - ), - )) - Expect(subnet.V6FreeIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::5..fd00::9", - "fd00::18..fd00::fffe", - ), - )) + Expect(subnet.V4Free).To(Equal(expectedV4)) + Expect(subnet.V6Free).To(Equal(expectedV6)) Expect(subnet.V4NicToIP).To(BeEmpty()) Expect(subnet.V4IPToPod).To(BeEmpty()) @@ -883,51 +900,50 @@ var _ = Describe("[IPAM]", func() { }) It("random allocation", func() { + ip1V4, _ := ipam.NewIP("10.16.0.1") + ip2V4, _ := ipam.NewIP("10.16.0.2") + ip1V6, _ := ipam.NewIP("fd00::1") + ip2V6, _ := ipam.NewIP("fd00::2") + subnet, err := ipam.NewSubnet(subnetName, "10.16.0.0/30,fd00::/126", nil) Expect(err).ShouldNot(HaveOccurred()) - ipv4, ipv6, _, err := subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + ipv4, ipv6, _, err := subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4.String()).To(Equal("10.16.0.1")) Expect(ipv6.String()).To(Equal("fd00::1")) - ipv4, ipv6, _, err = subnet.GetRandomAddress("pod1.ns", "pod1.ns", nil, nil, true) + ipv4, ipv6, _, err = subnet.GetRandomAddress("", "pod1.ns", "pod1.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4.String()).To(Equal("10.16.0.1")) Expect(ipv6.String()).To(Equal("fd00::1")) - ipv4, ipv6, _, err = subnet.GetRandomAddress("pod2.ns", "pod2.ns", nil, nil, true) + ipv4, ipv6, _, err = subnet.GetRandomAddress("", "pod2.ns", "pod2.ns", nil, nil, true) Expect(err).ShouldNot(HaveOccurred()) Expect(ipv4.String()).To(Equal("10.16.0.2")) Expect(ipv6.String()).To(Equal("fd00::2")) - _, _, _, err = subnet.GetRandomAddress("pod3.ns", "pod3.ns", nil, nil, true) + _, _, _, err = subnet.GetRandomAddress("", "pod3.ns", "pod3.ns", nil, nil, true) Expect(err).Should(MatchError(ipam.ErrNoAvailable)) - Expect(subnet.V4FreeIPList.Len()).To(Equal(0)) - Expect(subnet.V6FreeIPList.Len()).To(Equal(0)) + Expect(subnet.V4Free.Len()).To(Equal(0)) + Expect(subnet.V6Free.Len()).To(Equal(0)) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.1", "pod1.ns")) Expect(subnet.V4IPToPod).To(HaveKeyWithValue("10.16.0.2", "pod2.ns")) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("10.16.0.1"))) - Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("10.16.0.2"))) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod1.ns", ip1V4)) + Expect(subnet.V4NicToIP).To(HaveKeyWithValue("pod2.ns", ip2V4)) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::1", "pod1.ns")) Expect(subnet.V6IPToPod).To(HaveKeyWithValue("fd00::2", "pod2.ns")) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ipam.NewIP("fd00::1"))) - Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ipam.NewIP("fd00::2"))) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod1.ns", ip1V6)) + Expect(subnet.V6NicToIP).To(HaveKeyWithValue("pod2.ns", ip2V6)) subnet.ReleaseAddress("pod1.ns") subnet.ReleaseAddress("pod2.ns") - Expect(subnet.V4FreeIPList.Len()).To(Equal(0)) - Expect(subnet.V4ReleasedIPList).To(Equal( - ipam.NewIPRangeListFrom( - "10.16.0.1..10.16.0.2", - ), - )) - Expect(subnet.V6FreeIPList.Len()).To(Equal(0)) - Expect(subnet.V6ReleasedIPList).To(Equal( - ipam.NewIPRangeListFrom( - "fd00::1..fd00::2", - ), - )) + Expect(subnet.V4Free.Len()).To(Equal(0)) + expected, _ := ipam.NewIPRangeListFrom("10.16.0.1..10.16.0.2") + Expect(subnet.IPPools[""].V4Released).To(Equal(expected)) + Expect(subnet.V6Free.Len()).To(Equal(0)) + expected, _ = ipam.NewIPRangeListFrom("fd00::1..fd00::2") + Expect(subnet.IPPools[""].V6Released).To(Equal(expected)) Expect(subnet.V4IPToPod).To(BeEmpty()) Expect(subnet.V4NicToIP).To(BeEmpty()) Expect(subnet.V6IPToPod).To(BeEmpty()) diff --git a/test/unittest/ipam_bench/ipam_test.go b/test/unittest/ipam_bench/ipam_test.go index 40b1150418a..b15329194b5 100644 --- a/test/unittest/ipam_bench/ipam_test.go +++ b/test/unittest/ipam_bench/ipam_test.go @@ -186,7 +186,7 @@ func addSerailAddrCapacity(b *testing.B, im *ipam.IPAM, protocol string) { for n := 0; n < b.N; n++ { podName := fmt.Sprintf("pod%d", n) nicName := fmt.Sprintf("nic%d", n) - if _, _, _, err := im.GetRandomAddress(podName, nicName, nil, subnetName, nil, true); err != nil { + if _, _, _, err := im.GetRandomAddress(podName, nicName, nil, subnetName, "", nil, true); err != nil { b.Errorf("ERROR: allocate %s address failed with index %d with err %v ", protocol, n, err) return } @@ -313,7 +313,7 @@ func benchmarkAllocFreeAddrParallel(b *testing.B, podNumber int, protocol string podName := fmt.Sprintf("pod%d_%d", key, n) nicName := fmt.Sprintf("nic%d_%d", key, n) if key%2 == 1 { - if _, _, _, err := im.GetRandomAddress(podName, nicName, nil, subnetName, nil, true); err != nil { + if _, _, _, err := im.GetRandomAddress(podName, nicName, nil, subnetName, "", nil, true); err != nil { b.Errorf("ERROR: allocate %s address failed with index %d err %v ", protocol, n, err) return } diff --git a/yamls/crd.yaml b/yamls/crd.yaml index bb2f362e5ec..ab70b95ec32 100644 --- a/yamls/crd.yaml +++ b/yamls/crd.yaml @@ -2076,6 +2076,122 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + name: ippools.kubeovn.io +spec: + group: kubeovn.io + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: Subnet + type: string + jsonPath: .spec.subnet + - name: Protocol + type: string + jsonPath: .spec.protocol + - name: IPs + type: string + jsonPath: .spec.ips + - name: V4Used + type: number + jsonPath: .status.v4UsingIPs + - name: V4Available + type: number + jsonPath: .status.v4AvailableIPs + - name: V6Used + type: number + jsonPath: .status.v6UsingIPs + - name: V6Available + type: number + jsonPath: .status.v6AvailableIPs + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + subnet: + type: string + x-kubernetes-validations: + - rule: "self == oldSelf" + message: "This field is immutable." + namespaces: + type: array + x-kubernetes-list-type: set + items: + type: string + protocol: + type: string + enum: + - IPv4 + - IPv6 + - Dual + ips: + type: array + minItems: 1 + x-kubernetes-list-type: set + items: + type: string + anyOf: + - format: ipv4 + - format: ipv6 + - format: cidr + - pattern: ^(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.\.(?:(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])$ + - pattern: ^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))\.\.((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|:)))$ + required: + - subnet + - ips + status: + type: object + properties: + v4AvailableIPs: + type: number + v4UsingIPs: + type: number + v6AvailableIPs: + type: number + v6UsingIPs: + type: number + v4AvailableIPRange: + type: string + v4UsingIPRange: + type: string + v6AvailableIPRange: + type: string + v6UsingIPRange: + type: string + conditions: + type: array + items: + type: object + properties: + type: + type: string + status: + type: string + reason: + type: string + message: + type: string + lastUpdateTime: + type: string + lastTransitionTime: + type: string + scope: Cluster + names: + plural: ippools + singular: ippool + kind: IPPool + shortNames: + - ippool +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: name: vlans.kubeovn.io spec: diff --git a/yamls/sa.yaml b/yamls/sa.yaml index 6381b572f23..1aafc9409e0 100644 --- a/yamls/sa.yaml +++ b/yamls/sa.yaml @@ -71,6 +71,8 @@ rules: - vpc-nat-gateways/status - subnets - subnets/status + - ippools + - ippools/status - ips - vips - vips/status