-
Notifications
You must be signed in to change notification settings - Fork 881
/
nginx.ts
130 lines (123 loc) · 4.15 KB
/
nginx.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
// Copyright 2016-2019, Pulumi Corporation. All rights reserved.
import * as eks from "@pulumi/eks";
import * as k8s from "@pulumi/kubernetes";
import * as input from "@pulumi/kubernetes/types/input";
import * as pulumi from "@pulumi/pulumi";
import * as nginxIngCntlr from "./nginx-ing-cntlr";
// Creates the NGINX Ingress Controller.
interface NginxArgs {
image: pulumi.Input<string>;
replicas: pulumi.Input<number>;
namespace: pulumi.Input<string>;
ingressClass: string;
provider: k8s.Provider;
nodeSelectorTermValues: pulumi.Input<string>[];
}
export function create(
name: string,
args: NginxArgs,
): k8s.core.v1.Service {
// Define the Node affinity to target for the NGINX Deployment.
const affinity: input.core.v1.Affinity = {
// Target the Pods to run on nodes that match the labels for the node
// selector.
nodeAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: {
nodeSelectorTerms: [
{
matchExpressions: [
{
key: "beta.kubernetes.io/instance-type",
operator: "In",
values: args.nodeSelectorTermValues,
},
],
},
],
},
},
// Don't co-locate running Pods with matching labels on the same node,
// and spread them per the node hostname.
podAntiAffinity: {
requiredDuringSchedulingIgnoredDuringExecution: [
{
topologyKey: "kubernetes.io/hostname",
labelSelector: {
matchExpressions: [
{
key: "app",
operator: "In",
values: [ name ],
},
],
},
},
],
},
};
// Define the Pod tolerations of the tainted Nodes to target.
const tolerations: input.core.v1.Toleration[] = [
{
key: "nginx",
value: "true",
effect: "NoSchedule",
},
];
const deployment = nginxIngCntlr.create(name, {
replicas: args.replicas,
image: args.image,
labels: {app: name},
namespace: args.namespace,
ingressClass: args.ingressClass,
affinity: affinity,
tolerations: tolerations,
provider: args.provider,
});
const service = createService(name, {
labels: { app: name },
namespace: args.namespace,
provider: args.provider,
});
return service;
}
// Create the LoadBalancer Service to front the NGINX Ingress Controller,
interface NginxServiceArgs {
labels: pulumi.Input<any>;
namespace: pulumi.Input<string>;
provider: k8s.Provider;
}
export function createService(
name: string,
args: NginxServiceArgs,
): k8s.core.v1.Service {
const ENABLE_DRAINING: pulumi.Input<{[key: string]: pulumi.Input<string>}> = {
"service.beta.kubernetes.io/aws-load-balancer-connection-draining-enabled": "true",
};
const ENABLE_DRAINING_TIMEOUT: pulumi.Input<{[key: string]: pulumi.Input<string>}> = {
"service.beta.kubernetes.io/aws-load-balancer-connection-draining-timeout": "60",
};
return new k8s.core.v1.Service(
name,
{
metadata: {
// NGINX service name is fixed vs auto-named & ref'd in order for
// nginx-ing-cntlr arg --publish-service to work.
name: name,
labels: args.labels,
namespace: args.namespace,
annotations: {
...ENABLE_DRAINING,
...ENABLE_DRAINING_TIMEOUT,
},
},
spec: {
type: "LoadBalancer",
ports: [{port: 80, protocol: "TCP", targetPort: "http"}],
selector: args.labels,
},
},
{
provider: args.provider,
},
);
}