Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Do not set status.ip when proxy protocol is set #5

Merged
merged 3 commits into from
Apr 30, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .env.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CIVO_API_KEY=
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
kubeconfig
.env
./e2e/kubeconfig
33 changes: 33 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,39 @@ This controller is the Kubernetes cloud controller manager implementation for Ci

How do I run the `civo-cloud-controller-manager` [here!](https://github.com/civo/civo-cloud-controller-manager/blob/master/doc/getting-started.md)

## Load Balancers

The CCM will listen for Services with a type `LoadBalancer` set and provision an external load balancer within the Civo platform.

Once the Load Balancer is provisioned, a public DNS entry will be added mapping to the id of the load balancer. e.g. 92f8162c-c23c-4019-b6a0-2c18b8363f50.lb.civo.com.

Read More: https://www.civo.com/learn/managing-external-load-balancers-on-civo

### Load Balancer Customisation

| Annotation | Description | Example Values |
|------------|-------------|----------------|
| kubernetes.civo.com/firewall-id | If provided, an existing Firewall will be used. | 03093EF6-31E6-48B1-AB1D-152AC3A8C90A |
| kubernetes.civo.com/loadbalancer-enable-proxy-protocol | If set, a proxy-protocol header will be sent via the load balancer. <br /><br />NB: This requires support from the Service End Points within the cluster. | send-proxy<br />send-proxy-v2 |
| kubernetes.civo.com/loadbalancer-algorithm | Custom the algorithm the external load balancer uses | round_robin<br />least_connections |

### Load Balancer Status Annotations

| Annotation | Description | Sample Value |
| ------------------------------------- | ------------------------------------------------------ | ------------------------------------ |
| kubernetes.civo.com/cluster-id | The ID of the cluster the load balancer is assigned to | 05CE1CA2-067F-42F0-9BAA-17A6A800EFBB |
| kubernetes.civo.com/loadbalancer-id | The ID of the Load Balancer within Civo. | 92F8162C-C23C-4019-B6A0-2C18B8363F50 |
| kubernetes.civo.com/loadbalancer-name | The name of the Load Balancer | Lb-test |



### Proxy Protocol

When the proxy protocol annotation (`kubernetes.civo.com/loadbalancer-enable-proxy-protocol`) is set, the IP of the LoadBalancer is not set, only the Hostname. This will mean that all traffic local to the cluster to the LoadBalancer end point is now sent via the LoadBalancer. This allows services like CertManager to work correctly.

This option is currently a workaround for the issue https://github.com/kubernetes/ingress-nginx/issues/3996, should be removed or refactored after the Kubernetes [KEP-1860]


## Contributing

Bug reports and pull requests are welcome on GitHub at https://github.com/civo/civo-cloud-controller-manager
29 changes: 15 additions & 14 deletions cloud-controller-manager/civo/loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,13 +111,7 @@ func (l *loadbalancer) EnsureLoadBalancer(ctx context.Context, clusterName strin
return nil, err
}

return &v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{
IP: updatedlb.PublicIP,
},
},
}, nil
return lbStatusFor(updatedlb), nil
}

err = createLoadBalancer(ctx, clusterName, service, nodes, l.client.civoClient, l.client.kclient)
Expand All @@ -136,13 +130,20 @@ func (l *loadbalancer) EnsureLoadBalancer(ctx context.Context, clusterName strin
return nil, fmt.Errorf("loadbalancer is not yet available, current state: %s", civolb.State)
}

return &v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{
IP: civolb.PublicIP,
},
},
}, nil
return lbStatusFor(civolb), nil
}

func lbStatusFor(civolb *civogo.LoadBalancer) *v1.LoadBalancerStatus {
status := &v1.LoadBalancerStatus{
Ingress: make([]v1.LoadBalancerIngress, 1),
}

if civolb.EnableProxyProtocol == "" {
status.Ingress[0].IP = civolb.PublicIP
}
status.Ingress[0].Hostname = fmt.Sprintf("%s.lb.civo.com", civolb.ID)

return status
}

// UpdateLoadBalancer updates hosts under the specified load balancer.
Expand Down
79 changes: 76 additions & 3 deletions cloud-controller-manager/civo/loadbalancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,7 @@ func TestEnsureLoadBalancer(t *testing.T) {
nodes []*corev1.Node
store []civogo.LoadBalancer
cluster []civogo.KubernetesCluster
setIP bool
err error
}{
{
Expand Down Expand Up @@ -473,7 +474,72 @@ func TestEnsureLoadBalancer(t *testing.T) {
},
},
},
err: nil,
setIP: true,
err: nil,
},
{
name: "should not set ip for proxy protocol",
service: &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: corev1.NamespaceDefault,
Annotations: map[string]string{
annotationCivoClusterID: "a32fe5eb-1922-43e8-81bc-7f83b4011334",
annotationCivoLoadBalancerEnableProxyProtocol: "send-proxy",
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
Ports: []corev1.ServicePort{
{
Name: "http",
Protocol: corev1.ProtocolTCP,
Port: 80,
NodePort: 30000,
},
},
},
},
nodes: []*corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Address: "192.168.1.1",
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
Status: corev1.NodeStatus{
Addresses: []corev1.NodeAddress{
{
Address: "192.168.1.2",
},
},
},
},
},
cluster: []civogo.KubernetesCluster{
{
ID: "a32fe5eb-1922-43e8-81bc-7f83b4011334",
Name: "test",
Instances: []civogo.KubernetesInstance{
{
ID: "11bd4686-5dbf-4e35-b703-75f2864bd6b9",
Hostname: "node1",
},
},
},
},
setIP: false,
err: nil,
},
{
name: "should update an existing load balancer",
Expand Down Expand Up @@ -549,7 +615,8 @@ func TestEnsureLoadBalancer(t *testing.T) {
},
},
},
err: nil,
setIP: true,
err: nil,
},
}

Expand All @@ -576,7 +643,13 @@ func TestEnsureLoadBalancer(t *testing.T) {
lbStatus, err := lb.EnsureLoadBalancer(context.Background(), test.cluster[0].Name, test.service, test.nodes)
g.Expect(err).To(BeNil())

g.Expect(lbStatus.Ingress[0].IP).NotTo(BeEmpty())
if test.setIP {
g.Expect(lbStatus.Ingress[0].IP).NotTo(BeEmpty())
} else {
g.Expect(lbStatus.Ingress[0].IP).To(BeEmpty())
}

g.Expect(lbStatus.Ingress[0].Hostname).NotTo(BeEmpty())

svc, err := lb.client.kclient.CoreV1().Services(test.service.Namespace).Get(context.Background(), test.service.Name, metav1.GetOptions{})
if err != nil {
Expand Down
34 changes: 34 additions & 0 deletions e2e/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# e2e Tests

e2e tests for the Civo Cloud Controller Manager.

These tests can be run:

```bash
CIVO_API_KEY=.... go test --timeout 30 -v ./e2e/...
```

or the CIVO_API_KEY can be set in the `.env` file in the root of the project

## PreRequisites

A civo.com account is needed, and you'll need to get your API key from the web front end.

## Tests

The general flow for each test is as follows:

1. Provision a new cluster
2. Wait for the cluster to be provisioned
2. Scale down pre-deployed CCM in the cluster
4. Run the local copy of CCM
5. Run e2e Tests
6. Delete the cluster

### Node

> WIP

### Loadbalancers

> WIP
163 changes: 163 additions & 0 deletions e2e/loadbalacner_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
package test

import (
"context"
"fmt"
"testing"

. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)

func TestLoadbalacnerBasic(t *testing.T) {

g := NewGomegaWithT(t)

mirrorDeploy, err := deployMirrorPods(e2eTest.tenantClient)
g.Expect(err).ShouldNot(HaveOccurred())

lbls := map[string]string{"app": "mirror-pod"}
// Create a service of type: LoadBalacner
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "echo-pods",
Namespace: "default",
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{Name: "http", Protocol: "TCP", Port: 80, TargetPort: intstr.FromInt(8080)},
{Name: "https", Protocol: "TCP", Port: 443, TargetPort: intstr.FromInt(8443)},
},
Selector: lbls,
Type: "LoadBalancer",
},
}

fmt.Println("Creating Service")
err = e2eTest.tenantClient.Create(context.TODO(), svc)
g.Expect(err).ShouldNot(HaveOccurred())

g.Eventually(func() string {
err = e2eTest.tenantClient.Get(context.TODO(), client.ObjectKeyFromObject(svc), svc)
if len(svc.Status.LoadBalancer.Ingress) == 0 {
return ""
}
return svc.Status.LoadBalancer.Ingress[0].IP
}, "2m", "5s").ShouldNot(BeEmpty())

// Cleanup
err = cleanUp(mirrorDeploy, svc)
g.Expect(err).ShouldNot(HaveOccurred())

g.Eventually(func() error {
return e2eTest.tenantClient.Get(context.TODO(), client.ObjectKeyFromObject(svc), svc)
}, "2m", "5s").ShouldNot(BeNil())
}

func TestLoadbalacnerProxy(t *testing.T) {
g := NewGomegaWithT(t)

_, err := deployMirrorPods(e2eTest.tenantClient)
g.Expect(err).ShouldNot(HaveOccurred())

lbls := map[string]string{"app": "mirror-pod"}
// Create a service of type: LoadBalacner
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "echo-pods",
Namespace: "default",
Annotations: map[string]string{
"kubernetes.civo.com/loadbalancer-enable-proxy-protocol": "send-proxy",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{Name: "http", Protocol: "TCP", Port: 80, TargetPort: intstr.FromInt(8081)},
{Name: "https", Protocol: "TCP", Port: 443, TargetPort: intstr.FromInt(8444)},
},
Selector: lbls,
Type: "LoadBalancer",
},
}

fmt.Println("Creating Service")
err = e2eTest.tenantClient.Create(context.TODO(), svc)
g.Expect(err).ShouldNot(HaveOccurred())

g.Eventually(func() string {
err = e2eTest.tenantClient.Get(context.TODO(), client.ObjectKeyFromObject(svc), svc)
if len(svc.Status.LoadBalancer.Ingress) == 0 {
return ""
}
return svc.Status.LoadBalancer.Ingress[0].IP
}, "2m", "5s").ShouldNot(BeEmpty())

/*
// Cleanup
err = cleanUp(mirrorDeploy, svc)
g.Expect(err).ShouldNot(HaveOccurred())

g.Eventually(func() error {
return e2eTest.tenantClient.Get(context.TODO(), client.ObjectKeyFromObject(svc), svc)
}, "2m", "5s").ShouldNot(BeNil())
*/
}

func cleanUp(mirrorDeploy *appsv1.Deployment, svc *corev1.Service) error {
err := e2eTest.tenantClient.Delete(context.TODO(), svc)
if err != nil {
return err
}

return e2eTest.tenantClient.Delete(context.TODO(), mirrorDeploy)
}

func deployMirrorPods(c client.Client) (*appsv1.Deployment, error) {
lbls := map[string]string{"app": "mirror-pod"}
replicas := int32(2)
mirrorDeploy := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "echo-pods",
Namespace: "default",
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: lbls,
},
Replicas: &replicas,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: lbls,
Annotations: map[string]string{
"danm.k8s.io/interfaces": "[{\"tenantNetwork\":\"tenant-vxlan\", \"ip\":\"dynamic\"}]",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "mirror-pod",
Image: "dmajrekar/nginx-echo:latest",
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{
{Protocol: "TCP", ContainerPort: 8080},
{Protocol: "TCP", ContainerPort: 8081},
{Protocol: "TCP", ContainerPort: 8443},
{Protocol: "TCP", ContainerPort: 8444},
},
},
},
},
},
},
}

fmt.Println("Creating mirror deployment")
err := c.Create(context.TODO(), mirrorDeploy)
return mirrorDeploy, err

}
Loading