forked from antongisli/maas-baremetal-k8s-tutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmaas-setup.sh
324 lines (253 loc) · 11.4 KB
/
maas-setup.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
# Copyright 2012-2021 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
# README FIRST
# You need a reasonably powerful bare metal machine, 4 or more cores with 32 GB of RAM and 500GB of free disk space. Assumes a fresh install of Ubuntu server (20.04 or higher) on the machine.
# You need a bare metal machine is because nesting multiple layers of VMs will not work and/or have performance problems.
# Note: this tutorial has not been tested on versions prior to 20.04.
# clone the git repository
cd ~
git clone https://github.com/antongisli/maas-baremetal-k8s-tutorial.git
###
### Set up NAT for the local network so VM's/Bare Metal can see the Internet
###
# get local interface name (this assumes a single default route is present)
export INTERFACE=$(ip route | grep default | cut -d ' ' -f 5)
export IP_ADDRESS=$(ip -4 addr show dev $INTERFACE | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
sudo sysctl -p
sudo iptables -t nat -A POSTROUTING -o $INTERFACE -j SNAT --to $IP_ADDRESS
#TODO inbound port forwarding/load balancing
# Persist NAT configuration
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
sudo apt-get install iptables-persistent -y
###
### Install/configure LXD
###
sudo snap install --channel=latest/stable lxd
sudo snap refresh --channel=latest/stable lxd
# LXD init
sudo cat maas-baremetal-k8s-tutorial/lxd.conf | lxd init --preseed
# Wait for LXD to be ready
lxd waitready
# verify LXD network config
lxc network list
lxc network show br0
lxc profile show default
#test by making a container
lxc launch ubuntu:22.04 test-container
#test by making a VM
lxc launch ubuntu:22.04 test-vm --vm
# visit your VM
lxc exec test-vm -- /bin/bash
#clean up
lxc delete test-container
lxc delete test-vm
# lxd / maas issue. either upgrade lxd or maas to 3.1
sudo snap install jq
sudo snap install maas
sudo snap install maas-test-db
# Initialise MAAS
sudo maas init region+rack --database-uri maas-test-db:/// --maas-url http://${IP_ADDRESS}:5240/MAAS
# Sleeping for awhile to let MAAS do what it needs to do.
sleep 30
# Create MAAS admin and grab API key
sudo maas createadmin --username admin --password admin --email admin
export APIKEY=$(sudo maas apikey --username admin)
# MAAS admin login
maas login admin 'http://localhost:5240/MAAS/' $APIKEY
# Configure MAAS networking (set gateways, vlans, DHCP on etc). If you encounter errors
# here, it might be because MAAS hasn't finished initialising. You can try waiting a bit and rerunning.
export SUBNET=10.10.10.0/24
export FABRIC_ID=$(maas admin subnet read "$SUBNET" | jq -r ".vlan.fabric_id")
export VLAN_TAG=$(maas admin subnet read "$SUBNET" | jq -r ".vlan.vid")
export PRIMARY_RACK=$(maas admin rack-controllers read | jq -r ".[] | .system_id")
maas admin subnet update $SUBNET gateway_ip=10.10.10.1
maas admin ipranges create type=dynamic start_ip=10.10.10.200 end_ip=10.10.10.254
maas admin vlan update $FABRIC_ID $VLAN_TAG dhcp_on=True primary_rack=$PRIMARY_RACK
maas admin maas set-config name=upstream_dns value=8.8.8.8
#
# GO TO WEB INTERFACE.
# DO INITIAL SETUP.
# SELECT KVM->LXD from menu on left
# CLICK "ADD LXD HOST" in upper right hand corner.
# MANUALLY INSTALL THE CERTIFICATE PRESENTED TO YOU USING THE COMMAND IT TELLS YOU TO USE.
#
# Add LXD as a VM host for MAAS and capture the VM_HOST_ID
export VM_HOST_ID=$(maas admin vm-hosts create password=password type=lxd power_address=https://${IP_ADDRESS}:8443 \
project=maas | jq '.id')
# allow high CPU oversubscription so all VMs can use all cores
maas admin vm-host update $VM_HOST_ID cpu_over_commit_ratio=4
# create tags for MAAS
maas admin tags create name=juju-controller comment='This tag should to machines that will be used as juju controllers'
maas admin tags create name=metal comment='This tag should to machines that will be used as bare metal'
### creating VMs for Juju controller and our "bare metal"
# add a VM for the juju controller with minimal memory
maas admin vm-host compose $VM_HOST_ID cores=8 memory=2048 architecture="amd64/generic" \
storage="main:16(pool1)" hostname="juju-controller"
# get the system-id and tag the machine with "juju-controller"
export JUJU_SYSID=$(maas admin machines read | jq '.[]
| select(."hostname"=="juju-controller")
| .["system_id"]' | tr -d '"')
maas admin tag update-nodes "juju-controller" add=$JUJU_SYSID
### Enlist bare metal nodes into MAAS via GUI.
## Create 3 "bare metal" machines and tag them with "metal"
for ID in 1 2 3
do
maas admin vm-host compose $VM_HOST_ID cores=8 memory=8192 architecture="amd64/generic" \
storage="main:25(pool1),ceph:100(pool1)" hostname="metal-${ID}"
SYSID=$(maas admin machines read | jq -r --arg MACHINE "metal-${ID}" '.[]
| select(."hostname"==$MACHINE)
| .["system_id"]' | tr -d '"')
maas admin tag update-nodes "metal" add=$SYSID
done
### Juju setup (note, this section requires manual intervention)
cd ~
sudo snap install juju --classic
sed -i "s/IP_ADDRESS/$IP_ADDRESS/" maas-baremetal-k8s-tutorial/maas-cloud.yaml
juju add-cloud maas-cloud maas-baremetal-k8s-tutorial/maas-cloud.yaml
juju add-credential maas-cloud
juju clouds --local
juju credentials
# Bootstrap the maas-cloud - get a coffee
juju bootstrap maas-cloud --bootstrap-constraints "tags=juju-controller mem=2G"
juju add-model default
# fire up the juju gui to view the fun
# if it's a remote machine, you can use an SSH tunnel to get access to it:
# e.g. ssh [email protected] -L8080:10.10.10.2:17070
juju dashboard
# get coffee
# check jujus view of machines
juju machines
# add machines to juju from the maas cloud
# it will grab the 3 we already created since they are in a "READY state"
for ID in 1 2 3
do
juju add-machine
done
# take a look at machines list again, should see 3 machines
juju machines
#
## NOTE: The above added all the machines to the "default model" which is sort of
## fine for an example, but we're going to create a model specific to kubernetes.
## Since you can't the same machine to two models at once, let's free them up.
for ID in 1 2 3
do
juju remove-machine --force $ID
done
### Ceph
# create a VM to run Ceph overhead on MAAS server
maas admin vm-host compose 1 cores=8 memory=16384 architecture="amd64/generic" storage="main:16(pool1)" hostname="ceph-server"
maas admin tags create name=ceph comment='This tag should be applied to the VM that serve as a ceph OSD+Mon+MDS'
export CEPH_SYSID=$(maas admin machines read | jq '.[]
| select(."hostname"=="ceph-server")
| .["system_id"]' | tr -d '"')
maas admin tag update-nodes "ceph" add=$CEPH_SYSID
lxc config device add "ceph-server" sdb disk source=/dev/sdb
# create a model to contain machines and apps for ceph+k8s.
juju add-model ceph-k8s
# add the above machines to juju using their hostnames to control the order in
# in which they are added.
#juju add-machine --constraints "tags=ceph"
juju add-machine ceph-server
juju add-machine node-b
juju add-machine node-d
# deploy ceph-mon to LXD VMs inside our metal machines
juju deploy -n 3 ceph-mon --to lxd:0,lxd:1,lxd:2
# deploy ceph-osd directly to the machines
juju deploy --config maas-baremetal-k8s-tutorial/ceph-osd.yaml ceph-osd -n 3 --to 0,1,2
# relate ceph-mon and ceph-osd
juju integrate ceph-osd:mon ceph-mon:osd
# watch the fun (with a another coffee).
watch -c juju status --color
# Wait for Ceph to settle before proceeding
# If the deployment hangs up because one or more of the disks was a raid member:
### Wipe disk which formerly belonged to a RAID.
juju ssh 3
sudo dd if=/dev/zero of=/dev/sdb bs=512 count=1024
DEVSZ=$(sudo blockdev --getsz /dev/sdb)
SEEK=$(python3 -c "print($DEVSZ - 1024)")
sudo dd if=/dev/zero of=/dev/sdb bs=512 seek=$SEEK count=1024
### END of wiping disk from a former RAID
#
juju ssh ceph-mon/0 -- sudo ceph status
juju run ceph-mon/0 list-pools
### Kubernetes
# Deploy kubernetes-core with juju and re-use existing machines.
juju deploy kubernetes-core --map-machines=existing,0=0,1=1
# add the new kubernetes as a cloud to juju
mkdir ~/.kube
juju scp kubernetes-master/0:/home/ubuntu/config ~/.kube/config
# add storage relations
juju add-relation ceph-mon:admin kubernetes-master
juju add-relation ceph-mon:client kubernetes-master
# add k8s to juju (choose option 1, client only)
juju add-k8s my-k8s
juju bootstrap my-k8s
juju controllers
### Deploy a test application on K8s cluster
# Create a model in juju, which creates a namespace in K8s
juju add-model hello-kubecon
# Deploy the charm "hello-kubecon", and set a hostname for the ingress
juju deploy hello-kubecon --config juju-external-hostname=kubecon.test
# Deploy the ingress integrator - this is a helper to setup the ingress
juju deploy nginx-ingress-integrator ingress
# trust the ingress (it needs cluster credentials to make changes)
juju trust ingress --scope=cluster
# Relate our app to the ingress - this causes the ingress to be setup
juju relate hello-kubecon ingress
# Explore the setup
kubectl describe ingress -n hello-kubecon
kubectl get svc -n hello-kubecon
kubectl describe svc hello-kubecon-service -n hello-kubecon
kubectl get pods -n hello-kubecon
# Lastly, in order to be able to reach the service from outside our host machine,
# we can use port forwarding. Replace 10.10.10.5 with the IP seen on the ingress.
sudo iptables -t nat -A PREROUTING -p tcp -i $INTERFACE \
--dport 8000 -j DNAT --to-destination 10.10.10.5:80
# if you want to persist this, run sudo dpkg-reconfigure iptables-persistent
# Now you should be able to open a browser and navigate to http://$IP_ADDRESS:8000
# scale our kubernetes cluster - find a machine
# Avoid kubernetes-master or existing kubernetes-worker machines
# https://discourse.charmhub.io/t/scaling-applications/1075
juju switch maas-cloud-default
juju status
# add a kubernetes-worker
juju add-unit kubernetes-worker --to 2
# add another kubecon unit
juju switch my-k8s
juju add-unit -n 1 hello-kubecon
juju status
# what happened to the ingress?
kubectl get ingress -n hello-kubecon
# exercise for the reader - iptables round robin or MetalLB:)
# scale down hello-kubecon
juju remove-unit --num-units 1 hello-kubecon
# scaledown kubernetes
juju switch maas-cloud-default
juju remove-unit kubernetes-worker/1
juju status
# if you want to test destroying your hello-kubecon:
juju switch my-k8s
juju destroy-model hello-kubecon --release-storage
# if you want to destroy your kubenetes controller for juju
juju switch maas-cloud-default
juju destroy-controller my-k8s
# if you want to remove your k8s cluster:
juju switch maas-cloud-default
juju remove-application kubernetes-master kubernetes-worker etcd flannel easyrsa
# if you want to remove ceph
juju switch maas-cloud-default
juju remove-application ceph-mon ceph-osd
# To clean up everything:
juju destroy-controller -y --destroy-all-models --destroy-storage maas-cloud-default
# And the machines created in MAAS can be deleted easily in the MAAS GUI.
### Reference materials notes
# https://jaas.ai/ceph-base
# https://jaas.ai/canonical-kubernetes/bundle/471
# https://medium.com/swlh/kubernetes-external-ip-service-type-5e5e9ad62fcd
# https://charmhub.io/nginx-ingress-integrator
# https://drive.google.com/file/d/1estQna40vz4uS5tBd9CvKdILdwAmcNFH/view - hello-kubecon
# https://ubuntu.com/kubernetes/docs/troubleshooting - troubleshooting
# https://juju.is/blog/deploying-mattermost-and-kubeflow-on-kubernetes-with-juju-2-9
### END