Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

playbook and roles for knative #71

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions install-k8s-knative.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
- name: Install Runtime and Kubernetes
hosts:
- masters
- workers
roles:
- runtime
- download-k8s
- install-k8s

- name: Install networking - calico
hosts: masters
roles:
- install-calico

- name: Install nfs client
hosts: masters
roles:
- install-nfs-client

- name: Post install configurations
hosts:
- masters
- workers
roles:
- post-install
7 changes: 7 additions & 0 deletions roles/install-nfs-client/files/class.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "false"
39 changes: 39 additions & 0 deletions roles/install-nfs-client/files/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.10.10.60
- name: NFS_PATH
value: /ifs/kubernetes
volumes:
- name: nfs-client-root
nfs:
server: 10.10.10.60
path: /ifs/kubernetes
65 changes: 65 additions & 0 deletions roles/install-nfs-client/files/rbac.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
22 changes: 22 additions & 0 deletions roles/install-nfs-client/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
- name: Install nfs client packages
yum:
name:
- nfs-utils
- nfs4-acl-tools

- name: Copy nfs client resource files to /tmp
copy:
src: "{{ item }}"
dest: /tmp/
with_fileglob:
- "*"

- name: Template a nfs client script to /tmp/deploy-nfs-client.sh
template:
src: deploy-nfs-client.sh.j2
dest: /tmp/deploy-nfs-client.sh
mode: '0644'

- name: Run nfs client deployment script
shell: bash /tmp/deploy-nfs-client.sh

50 changes: 50 additions & 0 deletions roles/install-nfs-client/templates/deploy-nfs-client.sh.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#! /bin/bash

set -e

if ! command -v wget &> /dev/null
then
yum install wget -y
fi

if ! command -v yq &> /dev/null
then
wget https://github.com/mikefarah/yq/releases/download/v4.40.5/yq_linux_ppc64le
mv yq_linux_ppc64le /usr/bin/yq
chmod +x /usr/bin/yq
fi

export SVRADDR={{ bastion_ip }}
export LOCALDIR={{ nfs_directory }}

DEPLYAML=/tmp/deployment.yaml

yq eval '
(.spec.template.spec.containers[0].image = "k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2") |
(.spec.template.spec.containers[0].env[] | select(.name=="NFS_SERVER").value) = env(SVRADDR) |
(.spec.template.spec.containers[0].env[] |select(.name=="NFS_PATH").value) = env(LOCALDIR) |
(.spec.template.spec.volumes[0].nfs.server) = env(SVRADDR) |
(.spec.template.spec.volumes[0].nfs.path) = env(LOCALDIR)
' -i $DEPLYAML

kubectl create -f /tmp/rbac.yaml
kubectl create -f /tmp/class.yaml
kubectl create -f $DEPLYAML

for i in {1..20}; do
echo "Waiting for pods to start...."
sleep 6s
if [[ $(kubectl get pods -l app=nfs-client-provisioner -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') == "True" ]]
then
echo "Pods started."
break
else
if [[ $i == 20 ]]
then
echo "Pods didn't start after 120s."
exit 1
fi
fi
done

kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
7 changes: 7 additions & 0 deletions roles/post-install/files/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"auths": {
"icr.io": {
"auth": "<auth-token"
}
}
}
6 changes: 6 additions & 0 deletions roles/post-install/files/crictl.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 0
debug: false
pull-image-on-create: false
disable-pull-on-run: false
4 changes: 4 additions & 0 deletions roles/post-install/files/hosts.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
server = "https://{{ registry_domain_name }}"

[host."http://{{ registry_domain_name }}"]
capabilities = ["pull", "resolve", "push"]
30 changes: 30 additions & 0 deletions roles/post-install/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
- name: Setup containerd creds & certificate
block:
- name: Create a directory for registry certificate
file:
path: "/etc/containerd/certs.d/{{ registry_domain_name }}"
state: directory

- name: Copy ssl certificate file
copy:
src: share/ssl.crt
dest: "/etc/containerd/certs.d/{{ registry_domain_name }}/ssl.crt"

- name: Create hosts.toml file
template:
src: files/hosts.toml
dest: "/etc/containerd/certs.d/{{ registry_domain_name }}/hosts.toml"

# enable access to registry from kubernetes pods
# TODO: remove dirty patch solution
- name: Create config.json file for master
when: node_type == "master"
template:
src: files/config.json
dest: "/var/lib/kubelet/config.json"

- name: Create config.json file for worker
when: node_type == "worker"
template:
src: files/config.json
dest: "/var/lib/kubelet/config.json"
5 changes: 5 additions & 0 deletions roles/post-install/templates/coredns-patch.json.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"data": {
"Corefile": ".:53 {\n errors\n health {\n lameduck 5s\n }\n ready\n kubernetes cluster.local in-addr.arpa ip6.arpa {\n pods insecure\n fallthrough in-addr.arpa ip6.arpa\n ttl 30\n }\n hosts {\n {{ bastion_ip }} {{ cluster_domain_name }} {{ registry_domain_name }}\n fallthrough\n }\n prometheus :9153\n forward . /etc/resolv.conf {\n max_concurrent 1000\n }\n cache 30\n loop\n reload\n loadbalance\n}\n"
}
}
5 changes: 5 additions & 0 deletions roles/post-install/templates/hosts.toml.j2
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
server = "http://{{ registry_domain_name }}"

[host."http://{{ registry_domain_name }}"]
capabilities = ["pull", "resolve", "push"]
ca = "/etc/containerd/certs.d/{{ registry_domain_name }}/ssl.crt"