Skip to content

Commit

Permalink
update terraform and add k8s files
Browse files Browse the repository at this point in the history
Signed-off-by: Pavel Nikolov <[email protected]>
  • Loading branch information
pavelnikolov committed Jul 29, 2024
1 parent 10eb916 commit ee1c988
Show file tree
Hide file tree
Showing 7 changed files with 205 additions and 10 deletions.
17 changes: 16 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,22 @@ Some functional tests have been added which test the process of registration, at

### Client and Server setup

Assuming that Docker is present on your machine, the client and the server can be started by running `docker compose up`. Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this:
Assuming that Docker is present on your machine, the client and the server can be started by running using the `docker-compose.yaml` file:

```bash
$ docker compose up
[+] Running 2/0
✔ Container zkp-auth-server-1 Created 0.0s
✔ Container zkp-auth-client-1 Created 0.0s
Attaching to client-1, server-1
server-1 | Listening for connections on 0.0.0.0:50051
client-1 | Registration successful.
client-1 | Received challenge from server.
client-1 | Successfully logged in! Session ID: OooJ8n7FOOU1ZyhxOqfBhsvK5x4mwdP7
client-1 exited with code 0
```

Alternatively, if Docker is not available, one can always run the binaries using `cargo` like this:

* Run `cargo run --bin zkpauth-server` in one terminal; and then
* Run `cargo run --bin zkpauth-client` in another terminal
Expand Down
41 changes: 41 additions & 0 deletions k8s/client.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
apiVersion: batch/v1
kind: Job
metadata:
name: app
namespace: zkpauth-client
labels:
app: app
spec:
template:
metadata:
labels:
app: app
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- server
namespaces:
- zkpauth
topologyKey: "kubernetes.io/hostname"
containers:
- name: app
image: ghcr.io/pavelnikolov/zkpauth-client:overridden-later
env:
- name: SERVER_ADDR
value: "http://server.zkpauth:50051"
- name: CLIENT_ID
value: "client"
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
restartPolicy: Never
15 changes: 15 additions & 0 deletions k8s/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: zkp-auth

resources:
- server.yaml
- client.yaml

images:
- name: ghcr.io/pavelnikolov/zkpauth-server
newName: ghcr.io/pavelnikolov/zkpauth-server
newTag: latest
- name: ghcr.io/pavelnikolov/zkpauth-client
newName: ghcr.io/pavelnikolov/zkpauth-client
newTag: latest
34 changes: 34 additions & 0 deletions k8s/server.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: server
namespace: zkpauth
labels:
app: server
spec:
replicas: 1
selector:
matchLabels:
app: server
template:
metadata:
labels:
app: server
spec:
restartPolicy: Always
containers:
- name: server
image: ghcr.io/pavelnikolov/zkpauth-server:overridden-later
ports:
- name: grpc
containerPort: 50051
env:
- name: LISTEN_ADDR
value: "0.0.0.0:50051"
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 200m
memory: 200Mi
80 changes: 73 additions & 7 deletions terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,24 @@ provider "aws" {
region = var.aws_region
}


resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
}

resource "aws_subnet" "main" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
resource "aws_subnet" "az_a" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
availability_zone = format("%s%s", var.aws_region, "a")

tags = {
Name = "main"
}
}

resource "aws_subnet" "az_b" {
vpc_id = aws_vpc.main.id
cidr_block = "10.0.2.0/24"
availability_zone = format("%s%s", var.aws_region, "b")

tags = {
Name = "main"
Expand Down Expand Up @@ -53,8 +63,32 @@ resource "aws_iam_role_policy_attachment" "cluster_policy" {
role = aws_iam_role.cluster_role.name
}

# Optionally, enable Security Groups for Pods
# Reference: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html
resource "aws_iam_role" "NodeGroupRole" {
name = "EKSNodeGroupRole"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
}

resource "aws_iam_role_policy_attachment" "AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.NodeGroupRole.name
}

resource "aws_iam_role_policy_attachment" "AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.NodeGroupRole.name
}

resource "aws_iam_role_policy_attachment" "vpc_resource_controller_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
role = aws_iam_role.cluster_role.name
Expand All @@ -65,7 +99,7 @@ resource "aws_eks_cluster" "default" {
role_arn = aws_iam_role.cluster_role.arn

vpc_config {
subnet_ids = [aws_subnet.main.id]
subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id]
}

# Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling.
Expand All @@ -75,3 +109,35 @@ resource "aws_eks_cluster" "default" {
aws_iam_role_policy_attachment.vpc_resource_controller_policy,
]
}

resource "aws_eks_node_group" "cluster_node_group" {
cluster_name = aws_eks_cluster.default.name
node_group_name = "${terraform.workspace}-cluster-node_group"
node_role_arn = aws_iam_role.NodeGroupRole.arn
subnet_ids = [aws_subnet.az_a.id, aws_subnet.az_b.id]

scaling_config {
desired_size = 2
max_size = 2
min_size = 2
}

ami_type = "AL2_x86_64"
instance_types = ["t3.micro"]
capacity_type = "ON_DEMAND"
disk_size = 20

depends_on = [
aws_iam_role_policy_attachment.AmazonEKSWorkerNodePolicy,
aws_iam_role_policy_attachment.AmazonEKS_CNI_Policy
]
}

# use managed addons in order to make it easier to upgrade the Kubernetes version in future
resource "aws_eks_addon" "addons" {
for_each = { for addon in var.cluster_addons : addon.name => addon }
cluster_name = aws_eks_cluster.default.name
addon_name = each.value.name
addon_version = each.value.version
service_account_role_arn = aws_iam_role.cluster_role.arn
}
6 changes: 4 additions & 2 deletions terraform/output.tf
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
output "endpoint" {
value = aws_eks_cluster.default.endpoint
value = aws_eks_cluster.default.endpoint
sensitive = true
}

output "kubeconfig-certificate-authority-data" {
value = aws_eks_cluster.default.certificate_authority[0].data
value = aws_eks_cluster.default.certificate_authority[0].data
sensitive = true
}
22 changes: 22 additions & 0 deletions terraform/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,25 @@ variable "aws_region" {
default = "eu-central-1"
type = string
}

variable "cluster_addons" {
type = list(object({
name = string
version = string
}))

default = [
{
name = "kube-proxy"
version = "v1.30.0-eksbuild.3"
},
{
name = "vpc-cni"
version = "v1.18.2-eksbuild.1"
},
{
name = "coredns"
version = "v1.11.1-eksbuild.9"
}
]
}

0 comments on commit ee1c988

Please sign in to comment.