From 26f21300285ba09b21887016e265c5dc352afc65 Mon Sep 17 00:00:00 2001 From: Jacob Weinstock Date: Fri, 12 Jan 2024 19:40:07 -0700 Subject: [PATCH] Refactor into packages Signed-off-by: Jacob Weinstock --- go.mod | 1 - go.sum | 2 - go.work.sum | 25 + playground/Process.md | 15 - playground/cmd/create.go | 306 +++--- playground/cmd/delete.go | 17 +- playground/go.mod | 2 +- playground/internal/capi/capi.go | 146 +++ playground/internal/docker/docker.go | 143 +++ playground/internal/docker/vbmc.go | 101 ++ playground/internal/helm/helm.go | 49 + playground/internal/kind/kind.go | 54 ++ playground/internal/kubectl/kubectl.go | 124 +++ playground/internal/kubectl/kubectl_test.go | 58 ++ playground/internal/libvirt/libvirt.go | 35 + playground/internal/tinkerbell/marshal.go | 51 + .../{internal.go => tinkerbell/tinkerbell.go} | 47 +- playground/main.go | 870 ------------------ 18 files changed, 965 insertions(+), 1081 deletions(-) create mode 100644 playground/internal/capi/capi.go create mode 100644 playground/internal/docker/docker.go create mode 100644 playground/internal/docker/vbmc.go create mode 100644 playground/internal/helm/helm.go create mode 100644 playground/internal/kind/kind.go create mode 100644 playground/internal/kubectl/kubectl.go create mode 100644 playground/internal/kubectl/kubectl_test.go create mode 100644 playground/internal/libvirt/libvirt.go create mode 100644 playground/internal/tinkerbell/marshal.go rename playground/internal/{internal.go => tinkerbell/tinkerbell.go} (74%) diff --git a/go.mod b/go.mod index c276c3fc..f97a566b 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,6 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.11 // indirect - github.com/peterbourgon/ff/v3 v3.4.0 github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect diff --git a/go.sum b/go.sum index 45ff78bf..e66c8073 100644 --- a/go.sum +++ b/go.sum @@ -150,8 +150,6 @@ github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/peterbourgon/ff/v3 v3.4.0 h1:QBvM/rizZM1cB0p0lGMdmR7HxZeI/ZrBWB4DqLkMUBc= -github.com/peterbourgon/ff/v3 v3.4.0/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/go.work.sum b/go.work.sum index fc7b0934..0204eb9a 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,10 +1,12 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Jeffail/gabs/v2 v2.7.0/go.mod h1:dp5ocw1FvBBQYssgHsG7I1WYsiLRtkUaB1FEtSwvNUw= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/VictorLowther/simplexml v0.0.0-20180716164440-0bff93621230/go.mod h1:t2EzW1qybnPDQ3LR/GgeF0GOzHUXT5IVMLP2gkW1cmc= github.com/VictorLowther/soap v0.0.0-20150314151524-8e36fca84b22/go.mod h1:/B7V22rcz4860iDqstGvia/2+IYWXf3/JdQCVd/1D2A= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= @@ -15,8 +17,13 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= +github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -29,6 +36,7 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= @@ -69,11 +77,16 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0= +github.com/opencontainers/image-spec v1.1.0-rc4/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= @@ -87,15 +100,25 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stmcginnis/gofish v0.14.1-0.20230920133920-77490fd98fa2/go.mod h1:BLDSFTp8pDlf/xDbLZa+F7f7eW0E/CHCboggsu8CznI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0/go.mod h1:5z+/ZWJQKXa9YT34fQNx5K8Hd1EoIhvtUygUQPqEOgQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230127130021-4ca2cb1a16b7 h1:o7Ps2IYdzLRolS9/nadqeMSHpa9k8pu8u+VKBFUG7cQ= @@ -103,6 +126,7 @@ golang.org/x/exp v0.0.0-20230127130021-4ca2cb1a16b7/go.mod h1:CxIveKay+FTh1D0yPZ golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -123,4 +147,5 @@ k8s.io/component-base v0.28.1 h1:LA4AujMlK2mr0tZbQDZkjWbdhTV5bRyEyAFe0TJxlWg= k8s.io/component-base v0.28.1/go.mod h1:jI11OyhbX21Qtbav7JkhehyBsIRfnO8oEgoAR12ArIU= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +knative.dev/pkg v0.0.0-20211119170723-a99300deff34/go.mod h1:VqUp1KWJqpTDNoiSI/heaX3uMdubImslJE2tBkP+Bbw= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0= diff --git a/playground/Process.md b/playground/Process.md index 207b47b3..da5179e8 100644 --- a/playground/Process.md +++ b/playground/Process.md @@ -105,21 +105,6 @@ for i in {1..4}; do echo $i; docker exec -it virtualbmc vbmc start "node$i"; don kubectl set image deployment/capt-controller-manager -n capt-system manager=reg.weinstocklabs.com/tinkerbell/capt-amd64:latest ``` -1. update Rufio CRD: - - ```bash - kubectl delete crd machines.bmc.tinkerbell.org - kubectl delete crd tasks.bmc.tinkerbell.org - kubectl apply -f https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_machines.yaml - kubectl apply -f https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_tasks.yaml - ``` - -1. Apply Hardware, BMC machine, and secret objects. - - ```bash - kubectl apply -f output/apply/ - ``` - 1. Apply CAPI/CAPT cluster objects. ```bash diff --git a/playground/cmd/create.go b/playground/cmd/create.go index 64a89da4..4b9ad2e9 100644 --- a/playground/cmd/create.go +++ b/playground/cmd/create.go @@ -8,14 +8,23 @@ import ( "log" "net" "net/netip" + "net/url" "os" - "os/exec" "path/filepath" + "strconv" "strings" + "time" "github.com/peterbourgon/ff/v3" "github.com/peterbourgon/ff/v3/ffcli" - "github.com/tinkerbell/cluster-api-provider/playground/internal" + "github.com/tinkerbell/cluster-api-provider/playground/internal/capi" + "github.com/tinkerbell/cluster-api-provider/playground/internal/docker" + "github.com/tinkerbell/cluster-api-provider/playground/internal/helm" + "github.com/tinkerbell/cluster-api-provider/playground/internal/kind" + "github.com/tinkerbell/cluster-api-provider/playground/internal/kubectl" + "github.com/tinkerbell/cluster-api-provider/playground/internal/libvirt" + "github.com/tinkerbell/cluster-api-provider/playground/internal/tinkerbell" + "gopkg.in/yaml.v3" ) type Create struct { @@ -38,7 +47,7 @@ type Create struct { // SSHPublicKeyFile is the file location of the SSH public key that will be added to all control plane and worker nodes in the workload cluster SSHPublicKeyFile string // nodeData holds data for each node that will be created - nodeData []internal.NodeData + nodeData []tinkerbell.NodeData rootConfig *rootConfig kubeconfig string } @@ -53,11 +62,10 @@ func NewCreateCommand(rc *rootConfig) *ffcli.Command { ShortUsage: "create the CAPT playground [flags]", Options: []ff.Option{ff.WithEnvVarPrefix("CAPT_PLAYGROUND")}, FlagSet: fs, - Exec: func(context.Context, []string) error { + Exec: func(ctx context.Context, _ []string) error { println("create") - fmt.Printf("create: %+v\n", c.rootConfig) - return nil + return c.exec(ctx) }, } } @@ -68,9 +76,9 @@ func (c *Create) registerFlags(fs *flag.FlagSet) { fs.IntVar(&c.TotalHardware, "total-hardware", 4, "number of hardware CR that will be created in the management cluster") fs.IntVar(&c.ControlPlaneNodes, "control-plane-nodes", 1, "number of control plane nodes that will be created in the workload cluster") fs.IntVar(&c.WorkerNodes, "worker-nodes", 2, "number of worker nodes that will be created in the workload cluster") - fs.StringVar(&c.KubernetesVersion, "kubernetes-version", "v1.20.5", "version of Kubernetes that will be used to create the workload cluster") + fs.StringVar(&c.KubernetesVersion, "kubernetes-version", "v1.23.5", "version of Kubernetes that will be used to create the workload cluster") fs.StringVar(&c.Namespace, "namespace", "capt-playground", "namespace to use for all Objects created") - fs.StringVar(&c.TinkerbellStackVersion, "tinkerbell-stack-version", "v0.5.0", "version of the Tinkerbell stack that will be deployed to the management cluster") + fs.StringVar(&c.TinkerbellStackVersion, "tinkerbell-stack-version", "0.4.2", "version of the Tinkerbell stack that will be deployed to the management cluster") fs.StringVar(&c.SSHPublicKeyFile, "ssh-public-key-file", "", "file location of the SSH public key that will be added to all control plane and worker nodes in the workload cluster") } @@ -84,28 +92,51 @@ func (c *Create) exec(ctx context.Context) error { pwd = "./" } c.kubeconfig = filepath.Join(pwd, c.OutputDir, "kind.kubeconfig") + + st := struct { + ClusterName string `yaml:"clusterName"` + OutputDir string `yaml:"outputDir"` + TotalHardware int `yaml:"totalHardware"` + }{ + ClusterName: c.ClusterName, + OutputDir: c.OutputDir, + TotalHardware: c.TotalHardware, + } + d, err := yaml.Marshal(st) + if err != nil { + return fmt.Errorf("failed to write state file: %w", err) + } + if err := os.WriteFile(c.rootConfig.StateFile, d, 0644); err != nil { + return fmt.Errorf("failed to write state file: %w", err) + } // We need the docker network created first so that other containers and VMs can connect to it. log.Println("create kind cluster") - if err := c.createKindCluster(); err != nil { + if err := kind.CreateCluster(ctx, kind.Args{Name: "playground", Kubeconfig: c.kubeconfig}); err != nil { return fmt.Errorf("error creating kind cluster: %w", err) } // This runs before creating the data slice so that we can get the IP of the Virtual BMC container. + vbmc := docker.VirtualBMC{ + Network: "kind", + ContainerName: "virtualbmc", + LibvirtSocket: "/var/run/libvirt/libvirt-sock", + Image: "capt-playground:v2", + } log.Println("Start Virtual BMC") - vbmcIP, err := startVirtualBMC("kind") + vbmcIP, err := vbmc.RunVirtualBMCContainer(context.Background()) if err != nil { - log.Fatalf("error starting Virtual BMC: %s", err) + return fmt.Errorf("error starting Virtual BMC: %s", err) } // get the gateway of the kind network - gateway, err := getGateway("kind") + gateway, err := docker.IPv4GatewayFrom("kind") if err != nil { - log.Fatalf("error getting gateway: %s", err) + return fmt.Errorf("error getting gateway: %s", err) } - subnet, err := getSubnet("kind") + subnet, err := docker.IPv4SubnetFrom("kind") if err != nil { - log.Fatalf("error getting subnet: %s", err) + return fmt.Errorf("error getting subnet: %s", err) } log.Println("Populating node data") @@ -114,133 +145,101 @@ func (c *Create) exec(ctx context.Context) error { log.Println("deploy Tinkerbell stack") base := fmt.Sprintf("%v.%v.100", vbmcIP.As4()[0], vbmcIP.As4()[1]) // x.x.100 tinkerbellVIP := fmt.Sprintf("%v.%d", base, 101) // x.x.100.101 - if err := c.deployTinkerbellStack(tinkerbellVIP, c.Namespace); err != nil { - log.Fatalf("error deploying Tinkerbell stack: %s", err) + if err := c.deployTinkerbellStack(tinkerbellVIP); err != nil { + return fmt.Errorf("error deploying Tinkerbell stack: %s", err) } log.Println("creating Tinkerbell Custom Resources") if err := writeYamls(c.nodeData, c.OutputDir, c.Namespace); err != nil { - log.Fatalf("error writing yamls: %s", err) + return fmt.Errorf("error writing yamls: %s", err) } - return nil -} - -func (c *Create) createKindCluster() error { - /* - kind create cluster --name --kubeconfig /kind.kubeconfig - */ - cmd := "kind" - args := []string{"create", "cluster", "--name", c.ClusterName, "--kubeconfig", c.kubeconfig} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() + log.Println("create VMs") + bridge, err := docker.LinuxBridgeFrom("kind") if err != nil { - return fmt.Errorf("error creating kind cluster: %s: out: %v", err, string(out)) + return fmt.Errorf("error during VM creation: %w", err) + } + for _, d := range c.nodeData { + d := d + if err := libvirt.CreateVM(d.Hostname, bridge, d.MACAddress); err != nil { + return fmt.Errorf("error during VM creation: %w", err) + } } - return nil -} - -func startVirtualBMC(dockerNet string) (netip.Addr, error) { - /* - docker run -d --rm --network kind -v /var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock --name virtualbmc capt-playground:v2 - */ - cmd := "docker" - args := []string{ - "run", "-d", "--rm", - "--network", dockerNet, - "-v", "/var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro", - "-v", "/var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock", - "--name", "virtualbmc", - "capt-playground:v2", - } - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error starting Virtual BMC: %s: out: %v", err, string(out)) + log.Println("starting Virtual BMCs") + for _, d := range c.nodeData { + n := docker.BMCInfo{ + Username: d.BMCUsername, + Password: d.BMCPassword, + Hostname: d.Hostname, + Port: fmt.Sprintf("%d", d.BMCIP.Port()), + } + vbmc.BMCInfo = append(vbmc.BMCInfo, n) } - // get the IP of the container - args = []string{ - "inspect", "-f", "'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'", "virtualbmc", + log.Println("starting Virtual BMCs") + if err := vbmc.RegisterVirtualBMC(context.Background()); err != nil { + return fmt.Errorf("error starting Virtual BMCs: %s", err) } - e = exec.CommandContext(context.Background(), cmd, args...) - out, err = e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error getting Virtual BMC IP: %s: out: %v", err, string(out)) + if err := vbmc.StartVirtualBMC(context.Background()); err != nil { + return fmt.Errorf("error starting Virtual BMCs: %s", err) } - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - ip, err := netip.ParseAddr(o) - if err != nil { - return netip.Addr{}, fmt.Errorf("error parsing Virtual BMC IP: %s: out: %v", err, string(out)) + log.Println("update Rufio CRDs") + args := kubectl.Args{ + Cmd: "delete", + AdditionalPrefixArgs: []string{"crd", "machines.bmc.tinkerbell.org", "tasks.bmc.tinkerbell.org"}, + Kubeconfig: c.kubeconfig, } - - return ip, nil -} - -func getGateway(dockerNet string) (netip.Addr, error) { - /* - docker network inspect kind -f '{{range .IPAM.Config}}{{.Gateway}},{{end}}' - result: 172.20.0.1, - */ - cmd := "docker" - args := []string{"network", "inspect", dockerNet, "-f", "'{{range .IPAM.Config}}{{.Gateway}},{{end}}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error getting gateway: %s: out: %v", err, string(out)) + if _, err := kubectl.RunCommand(context.Background(), args); err != nil { + return fmt.Errorf("error deleting Rufio CRDs: %w", err) + } + rufioCRDs := []string{ + "https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_machines.yaml", + "https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_tasks.yaml", + } + if err := kubectl.ApplyFiles(context.Background(), c.kubeconfig, rufioCRDs); err != nil { + return fmt.Errorf("update Rufio CRDs: %w", err) } - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - subnets := strings.Split(o, ",") - for _, s := range subnets { - ip, err := netip.ParseAddr(s) - if err == nil && ip.Is4() { - return ip, nil - } + log.Println("apply all Tinkerbell manifests") + if err := kubectl.ApplyFiles(context.Background(), c.kubeconfig, []string{filepath.Join(c.OutputDir, "apply") + "/"}); err != nil { + return fmt.Errorf("error applying Tinkerbell manifests: %w", err) } - return netip.Addr{}, fmt.Errorf("unable to determine docker network gateway, err from command: %s: stdout: %v", err, string(out)) -} + log.Println("creating clusterctl.yaml") + if err := capi.ClusterctlYamlToDisk(c.OutputDir); err != nil { + return fmt.Errorf("error creating clusterctl.yaml: %w", err) + } -func getSubnet(dockerNet string) (net.IPMask, error) { - /* - docker network inspect kind -f '{{range .IPAM.Config}}{{.Subnet}},{{end}}' - result: 172.20.0.0/16,fc00:f853:ccd:e793::/64, - */ - cmd := "docker" - args := []string{"network", "inspect", dockerNet, "-f", "'{{range .IPAM.Config}}{{.Subnet}},{{end}}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return nil, fmt.Errorf("error getting subnet: %s: out: %v", err, string(out)) + log.Println("running clusterctl init") + if capi.ClusterctlInit(c.OutputDir, c.kubeconfig, tinkerbellVIP); err != nil { + return fmt.Errorf("error running clusterctl init: %w", err) } - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - subnets := strings.Split(o, ",") - for _, s := range subnets { - _, ipnet, err := net.ParseCIDR(s) - if err == nil { - if ipnet.IP.To4() != nil { - return ipnet.Mask, nil - } - } + log.Println("running clusterctl generate cluster") + podCIDR := fmt.Sprintf("%v.100.0.0/16", vbmcIP.As4()[0]) // x.100.0.0/16 (172.25.0.0/16) + controlPlaneVIP := fmt.Sprintf("%v.%d", base, 100) // x.x.100.100 + if err := capi.ClusterYamlToDisk(c.OutputDir, c.ClusterName, c.Namespace, strconv.Itoa(c.ControlPlaneNodes), strconv.Itoa(c.WorkerNodes), c.KubernetesVersion, controlPlaneVIP, podCIDR, c.kubeconfig); err != nil { + return fmt.Errorf("error running clusterctl generate cluster: %w", err) + } + if err := kubectl.KustomizeClusterYaml(c.OutputDir, c.ClusterName, c.kubeconfig, c.SSHPublicKeyFile, capi.KustomizeYaml, c.Namespace, string(CAPTRole)); err != nil { + return fmt.Errorf("error running kustomize: %w", err) } - return nil, fmt.Errorf("unable to determine docker network subnet mask, err from command: %s: stdout: %v", err, string(out)) + return nil } -func (c *Create) populateNodeData(vbmcIP netip.Addr, subnet net.IPMask, gateway netip.Addr) []internal.NodeData { +func (c *Create) populateNodeData(vbmcIP netip.Addr, subnet net.IPMask, gateway netip.Addr) []tinkerbell.NodeData { // Use the vbmcIP in order to determine the subnet for the KinD network. // This is used to create the IP addresses for the VMs, Tinkerbell stack LB IP, and the KubeAPI server VIP. base := fmt.Sprintf("%v.%v.100", vbmcIP.As4()[0], vbmcIP.As4()[1]) // x.x.100 - nd := make([]internal.NodeData, c.TotalHardware) + nd := make([]tinkerbell.NodeData, c.TotalHardware) curControlPlaneNodesCount := 0 curWorkerNodesCount := 0 for i := 0; i < c.TotalHardware; i++ { num := i + 1 - d := internal.NodeData{ + d := tinkerbell.NodeData{ Hostname: fmt.Sprintf("node%v", num), MACAddress: net.HardwareAddr{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, Nameservers: []string{"8.8.8.8", "1.1.1.1"}, @@ -282,53 +281,74 @@ func GenerateRandMAC() (net.HardwareAddr, error) { return buf, nil } -func (c *Create) deployTinkerbellStack(tinkVIP string, namespace string) error { +func (c *Create) deployTinkerbellStack(tinkVIP string) error { /* trusted_proxies=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}') LB_IP=x.x.x.x helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version "$STACK_CHART_VERSION" --create-namespace --namespace tink-system --wait --set "smee.trustedProxies={${trusted_proxies}}" --set "hegel.trustedProxies={${trusted_proxies}}" --set "stack.loadBalancerIP=$LB_IP" --set "smee.publicIP=$LB_IP" */ - var trustedProxies string + var trustedProxies []string + timeout := time.NewTimer(time.Minute) +LOOP: for { - cmd := "kubectl" - args := []string{"get", "nodes", "-o", "jsonpath='{.items[*].spec.podCIDR}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() + select { + case <-timeout.C: + return fmt.Errorf("unable to get node cidrs after 1 minute") + default: + } + /* + cmd := "kubectl" + args := []string{"get", "nodes", "-o", "jsonpath='{.items[*].spec.podCIDR}'"} + e := exec.CommandContext(context.Background(), cmd, args...) + e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error getting trusted proxies: %s: out: %v", err, string(out)) + } + // strip quotes + trustedProxies = strings.Trim(string(out), "'") + */ + cidrs, err := kubectl.GetNodeCidrs(context.Background(), c.kubeconfig) if err != nil { - return fmt.Errorf("error getting trusted proxies: %s: out: %v", err, string(out)) + return fmt.Errorf("error getting node cidrs: %w", err) } - // strip quotes - trustedProxies = strings.Trim(string(out), "'") - v, _, _ := net.ParseCIDR(trustedProxies) - if v != nil { - break + for _, c := range cidrs { + v, cdr, _ := net.ParseCIDR(c) + if v != nil { + trustedProxies = append(trustedProxies, cdr.String()) + break LOOP + } } } - cmd := "helm" - args := []string{ - "install", "tink-stack", "oci://ghcr.io/tinkerbell/charts/stack", - "--version", c.TinkerbellStackVersion, - "--create-namespace", "--namespace", namespace, - "--wait", - "--set", fmt.Sprintf("smee.trustedProxies={%s}", trustedProxies), - "--set", fmt.Sprintf("hegel.trustedProxies={%s}", trustedProxies), - "--set", fmt.Sprintf("stack.loadBalancerIP=%s", tinkVIP), - "--set", fmt.Sprintf("smee.publicIP=%s", tinkVIP), - "--set", "rufio.image=quay.io/tinkerbell/rufio:latest", - } - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error deploying Tinkerbell stack: %s: out: %v", err, string(out)) + a := helm.Args{ + ReleaseName: "tink-stack", + Chart: &url.URL{ + Scheme: "oci", + Host: "ghcr.io", + Path: "/tinkerbell/charts/stack", + }, + Version: c.TinkerbellStackVersion, + CreateNamespace: true, + Namespace: c.Namespace, + Wait: true, + SetArgs: map[string]string{ + "smee.trustedProxies": fmt.Sprintf("{%s}", strings.Join(trustedProxies, ",")), + "hegel.trustedProxies": fmt.Sprintf("{%s}", strings.Join(trustedProxies, ",")), + "stack.loadBalancerIP": tinkVIP, + "smee.publicIP": tinkVIP, + "rufio.image": "quay.io/tinkerbell/rufio:latest", + }, + Kubeconfig: c.kubeconfig, + } + if err := helm.Install(context.Background(), a); err != nil { + return fmt.Errorf("error deploying Tinkerbell stack: %w", err) } return nil } -func writeYamls(ds []internal.NodeData, outputDir string, namespace string) error { +func writeYamls(ds []tinkerbell.NodeData, outputDir string, namespace string) error { p := filepath.Join(outputDir, "apply") if err := os.MkdirAll(p, 0755); err != nil && !os.IsExist(err) { return err @@ -338,9 +358,9 @@ func writeYamls(ds []internal.NodeData, outputDir string, namespace string) erro name string data []byte }{ - {name: fmt.Sprintf("hardware-%s.yaml", d.Hostname), data: internal.MarshalOrEmpty(d.Hardware(namespace))}, - {name: fmt.Sprintf("bmc-machine-%s.yaml", d.Hostname), data: internal.MarshalOrEmpty(d.BMCMachine(namespace))}, - {name: fmt.Sprintf("bmc-secret-%s.yaml", d.Hostname), data: internal.MarshalOrEmpty(d.BMCSecret(namespace))}, + {name: fmt.Sprintf("hardware-%s.yaml", d.Hostname), data: tinkerbell.MarshalOrEmpty(d.Hardware(namespace))}, + {name: fmt.Sprintf("bmc-machine-%s.yaml", d.Hostname), data: tinkerbell.MarshalOrEmpty(d.BMCMachine(namespace))}, + {name: fmt.Sprintf("bmc-secret-%s.yaml", d.Hostname), data: tinkerbell.MarshalOrEmpty(d.BMCSecret(namespace))}, } for _, yaml := range y { diff --git a/playground/cmd/delete.go b/playground/cmd/delete.go index f2aa2350..fb29048c 100644 --- a/playground/cmd/delete.go +++ b/playground/cmd/delete.go @@ -123,18 +123,29 @@ func deleteVirshNodes(num int) error { args := []string{"-c", "qemu:///system", "destroy", fmt.Sprintf("node%d", i)} e := exec.CommandContext(context.Background(), cmd, args...) out, err := e.CombinedOutput() - if err != nil && !strings.Contains(string(out), "Domain not found") { - return fmt.Errorf("error destroying virsh node, command: `%v %v`, err: %w: output: %s", cmd, strings.Join(args, " "), err, out) + if err != nil && !contains(strings.ToLower(string(out)), []string{"domain not found", "failed to get domain", "domain is not running"}) { + // return fmt.Errorf("error destroying virsh node, command: `%v %v`, err: %w: output: %s", cmd, strings.Join(args, " "), err, out) + continue } // remove the VM and any disks associated with it args = []string{"-c", "qemu:///system", "undefine", fmt.Sprintf("node%d", i), "--remove-all-storage", "--nvram"} e = exec.CommandContext(context.Background(), cmd, args...) out, err = e.CombinedOutput() - if err != nil { + if err != nil && !contains(strings.ToLower(string(out)), []string{"domain not found", "failed to get domain"}) { return fmt.Errorf("error removing virsh node: command: `%v %v`, err: %w: output: %s", cmd, strings.Join(args, " "), err, out) } } return nil } + +func contains(s string, substrs []string) bool { + for _, sub := range substrs { + if strings.Contains(s, sub) { + return true + } + } + + return false +} diff --git a/playground/go.mod b/playground/go.mod index 174508f7..d4d653e8 100644 --- a/playground/go.mod +++ b/playground/go.mod @@ -6,6 +6,7 @@ require ( github.com/peterbourgon/ff/v3 v3.4.0 github.com/tinkerbell/rufio v0.3.2 github.com/tinkerbell/tink v0.9.0 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 @@ -21,7 +22,6 @@ require ( golang.org/x/net v0.17.0 // indirect golang.org/x/text v0.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/controller-runtime v0.16.2 // indirect diff --git a/playground/internal/capi/capi.go b/playground/internal/capi/capi.go new file mode 100644 index 00000000..8a2156c3 --- /dev/null +++ b/playground/internal/capi/capi.go @@ -0,0 +1,146 @@ +package capi + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" +) + +const ( + binary = "clusterctl" + clusterctlYaml = "clusterctl.yaml" +) + +func ClusterctlYamlToDisk(outputDir string) error { + contents := fmt.Sprintf(`providers: + - name: "tinkerbell" + url: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases/v%v/infrastructure-components.yaml" + type: "InfrastructureProvider"`, "0.4.0") + + return os.WriteFile(filepath.Join(outputDir, clusterctlYaml), []byte(contents), 0644) +} + +func ClusterctlInit(outputDir, kubeconfig, tinkerbellVIP string) error { + /* + TINKERBELL_IP=172.18.18.18 clusterctl --config output/clusterctl.yaml init --infrastructure tinkerbell + */ + + args := []string{"init", "--config", filepath.Join(outputDir, clusterctlYaml), "--infrastructure", "tinkerbell"} + e := exec.CommandContext(context.Background(), binary, args...) + e.Env = []string{ + fmt.Sprintf("TINKERBELL_IP=%s", tinkerbellVIP), + fmt.Sprintf("KUBECONFIG=%s", kubeconfig), + "XDG_CONFIG_HOME=/tmp/xdg", + "XDG_CONFIG_DIRS=/tmp/xdg", + "XDG_STATE_HOME=/tmp/xdg", + "XDG_CACHE_HOME=/tmp/xdg", + "XDG_RUNTIME_DIR=/tmp/xdg", + "XDG_DATA_HOME=/tmp/xdg", + "XDG_DATA_DIRS=/tmp/xdg", + } + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error running clusterctl init: %s: out: %v", err, string(out)) + } + + return nil +} + +func ClusterYamlToDisk(outputDir, clusterName, namespace, cpNodeNum, workerNodeNum, k8sVer, cpVIP, podCIDR, kubeconfig string) error { + /* + CONTROL_PLANE_VIP=172.18.18.17 POD_CIDR=172.25.0.0/16 clusterctl generate cluster playground --config outputDir/clusterctl.yaml --kubernetes-version v1.23.5 --control-plane-machine-count=1 --worker-machine-count=2 --target-namespace=tink-system --write-to playground.yaml + */ + args := []string{ + "generate", "cluster", clusterName, + "--config", filepath.Join(outputDir, "clusterctl.yaml"), + "--kubernetes-version", fmt.Sprintf("%v", k8sVer), + fmt.Sprintf("--control-plane-machine-count=%v", cpNodeNum), + fmt.Sprintf("--worker-machine-count=%v", workerNodeNum), + fmt.Sprintf("--target-namespace=%v", namespace), + "--write-to", filepath.Join(outputDir, fmt.Sprintf("%v.yaml", clusterName)), + } + e := exec.CommandContext(context.Background(), binary, args...) + e.Env = []string{ + fmt.Sprintf("CONTROL_PLANE_VIP=%s", cpVIP), + fmt.Sprintf("POD_CIDR=%v", podCIDR), + fmt.Sprintf("KUBECONFIG=%s", kubeconfig), + "XDG_CONFIG_HOME=/tmp/xdg", + "XDG_CONFIG_DIRS=/tmp/xdg", + "XDG_STATE_HOME=/tmp/xdg", + "XDG_CACHE_HOME=/tmp/xdg", + "XDG_RUNTIME_DIR=/tmp/xdg", + "XDG_DATA_HOME=/tmp/xdg", + "XDG_DATA_DIRS=/tmp/xdg", + } + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error running clusterctl generate cluster: %s: out: %v", err, string(out)) + } + return nil +} + +var KustomizeYaml = `apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: {{.Namespace}} +resources: + - playground.yaml +patches: + - target: + group: infrastructure.cluster.x-k8s.io + kind: TinkerbellMachineTemplate + name: ".*control-plane.*" + version: v1beta1 + patch: |- + - op: add + path: /spec/template/spec + value: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + {{ .NodeLabel }}: control-plane + - target: + group: infrastructure.cluster.x-k8s.io + kind: TinkerbellMachineTemplate + name: ".*worker.*" + version: v1beta1 + patch: |- + - op: add + path: /spec/template/spec + value: + hardwareAffinity: + required: + - labelSelector: + matchLabels: + {{ .NodeLabel }}: worker +{{- if .SSHAuthorizedKey }} + - target: + group: bootstrap.cluster.x-k8s.io + kind: KubeadmConfigTemplate + name: "playground-.*" + version: v1beta1 + patch: |- + - op: add + path: /spec/template/spec/users + value: + - name: tink + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - {{ .SSHAuthorizedKey }} + - target: + group: controlplane.cluster.x-k8s.io + kind: KubeadmControlPlane + name: "playground-.*" + version: v1beta1 + patch: |- + - op: add + path: /spec/kubeadmConfigSpec/users + value: + - name: tink + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - {{ .SSHAuthorizedKey }} +{{ end -}} +` diff --git a/playground/internal/docker/docker.go b/playground/internal/docker/docker.go new file mode 100644 index 00000000..791446c6 --- /dev/null +++ b/playground/internal/docker/docker.go @@ -0,0 +1,143 @@ +package docker + +import ( + "context" + "fmt" + "net" + "net/netip" + "os/exec" + "strings" +) + +const binary = "docker" + +type Args struct { + Cmd string + Detach bool + Autoremove bool + Network string + BindMounts map[string]string + Image string + Name string + OutputFormat string + AdditionalPrefixArgs []string + AdditionalSuffixArgs []string +} + +// RunCommand runs a docker command with the given args +func RunCommand(ctx context.Context, c Args) (string, error) { + cmd := binary + args := []string{c.Cmd} + args = append(args, c.AdditionalPrefixArgs...) + if c.Name != "" { + args = append(args, "--name", c.Name) + } + if c.Detach { + args = append(args, "-d") + } + if c.Autoremove { + args = append(args, "--rm") + } + if c.Network != "" { + args = append(args, "--network", c.Network) + } + for hostPath, containerPath := range c.BindMounts { + args = append(args, "-v", fmt.Sprintf("%s:%s", hostPath, containerPath)) + } + if c.OutputFormat != "" { + args = append(args, "--format", c.OutputFormat) + } + if c.Image != "" { + args = append(args, c.Image) + } + args = append(args, c.AdditionalSuffixArgs...) + + e := exec.CommandContext(context.Background(), cmd, args...) + out, err := e.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to run container: cmd: %v err: %w: out: %s", fmt.Sprintf("[%v %v]", cmd, strings.Join(args, " ")), err, out) + } + + return string(out), nil +} + +// IPv4SubnetFrom returns the subnet mask from the given docker network +func IPv4SubnetFrom(dockerNet string) (net.IPMask, error) { + /* + docker network inspect kind -f '{{range .IPAM.Config}}{{.Subnet}},{{end}}' + result: 172.20.0.0/16,fc00:f853:ccd:e793::/64, + */ + args := Args{ + Cmd: "network", + OutputFormat: "'{{range .IPAM.Config}}{{.Subnet}},{{end}}'", + AdditionalPrefixArgs: []string{"inspect", dockerNet}, + } + out, err := RunCommand(context.Background(), args) + if err != nil { + return nil, fmt.Errorf("error getting subnet: %s: out: %v", err, string(out)) + } + + o := strings.Trim(strings.Trim(string(out), "\n"), "'") + subnets := strings.Split(o, ",") + for _, s := range subnets { + _, ipnet, err := net.ParseCIDR(s) + if err == nil { + if ipnet.IP.To4() != nil { + return ipnet.Mask, nil + } + } + } + + return nil, fmt.Errorf("unable to determine docker network subnet mask, err from command: %s: stdout: %v", err, string(out)) +} + +func IPv4GatewayFrom(dockerNet string) (netip.Addr, error) { + /* + docker network inspect kind -f '{{range .IPAM.Config}}{{.Gateway}},{{end}}' + result: 172.20.0.1, + */ + args := Args{ + Cmd: "network", + OutputFormat: "'{{range .IPAM.Config}}{{.Gateway}},{{end}}'", + AdditionalPrefixArgs: []string{"inspect", dockerNet}, + } + out, err := RunCommand(context.Background(), args) + if err != nil { + return netip.Addr{}, fmt.Errorf("error getting gateway: %w", err) + } + + o := strings.Trim(strings.Trim(string(out), "\n"), "'") + subnets := strings.Split(o, ",") + for _, s := range subnets { + ip, err := netip.ParseAddr(s) + if err == nil && ip.Is4() { + return ip, nil + } + } + + return netip.Addr{}, fmt.Errorf("unable to determine docker network gateway, err from command: %s: stdout: %v", err, string(out)) +} + +func LinuxBridgeFrom(dockerNet string) (string, error) { + /* + network_id=$(docker network inspect -f {{.Id}} kind) + bridge_name="br-${network_id:0:11}" + brctl show $bridge_name + */ + args := Args{ + Cmd: "network", + OutputFormat: "'{{.Id}}'", + AdditionalPrefixArgs: []string{"inspect"}, + AdditionalSuffixArgs: []string{dockerNet}, + } + out, err := RunCommand(context.Background(), args) + if err != nil { + return "", fmt.Errorf("error getting network id: %w", err) + } + bridgeID := string(out)[:13] + bridgeID = strings.Trim(bridgeID, "'") + bridgeName := fmt.Sprintf("br-%s", bridgeID) + // TODO: check if bridge exists + + return bridgeName, nil +} diff --git a/playground/internal/docker/vbmc.go b/playground/internal/docker/vbmc.go new file mode 100644 index 00000000..44dd13bd --- /dev/null +++ b/playground/internal/docker/vbmc.go @@ -0,0 +1,101 @@ +package docker + +import ( + "context" + "fmt" + "net/netip" + "strings" +) + +type VirtualBMC struct { + Image string + Network string + ContainerName string + LibvirtSocket string + BMCInfo []BMCInfo +} + +type BMCInfo struct { + Username string + Password string + Hostname string + Port string +} + +func (v VirtualBMC) RunVirtualBMCContainer(ctx context.Context) (netip.Addr, error) { + /* + docker run -d --rm --network kind -v /var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock --name virtualbmc capt-playground:v2 + */ + args := Args{ + Cmd: "run", + Detach: true, + Network: v.Network, + Autoremove: true, + BindMounts: map[string]string{ + fmt.Sprintf("%s-ro", v.LibvirtSocket): "/var/run/libvirt/libvirt-sock-ro", + v.LibvirtSocket: "/var/run/libvirt/libvirt-sock", + }, + Name: v.ContainerName, + Image: v.Image, + } + if _, err := RunCommand(context.Background(), args); err != nil { + return netip.Addr{}, err + } + + // get the IP of the container + args = Args{ + Cmd: "inspect", + OutputFormat: "'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'", + AdditionalSuffixArgs: []string{"virtualbmc"}, + } + out, err := RunCommand(context.Background(), args) + if err != nil { + return netip.Addr{}, err + } + + o := strings.Trim(strings.Trim(string(out), "\n"), "'") + ip, err := netip.ParseAddr(o) + if err != nil { + return netip.Addr{}, fmt.Errorf("error parsing Virtual BMC IP: %s: out: %v", err, string(out)) + } + + return ip, nil +} + +func (v VirtualBMC) RegisterVirtualBMC(ctx context.Context) error { + for _, bmc := range v.BMCInfo { + args := Args{ + Cmd: "exec", + AdditionalPrefixArgs: []string{ + v.ContainerName, + "vbmc", "add", + "--username", bmc.Username, + "--password", bmc.Password, + "--port", bmc.Port, + bmc.Hostname, + }, + } + if _, err := RunCommand(ctx, args); err != nil { + return err + } + } + + return nil +} + +func (v VirtualBMC) StartVirtualBMC(ctx context.Context) error { + /* + docker exec virtualbmc vbmc start node1 + */ + for _, bmc := range v.BMCInfo { + args := Args{ + Cmd: "exec", + AdditionalPrefixArgs: []string{v.ContainerName, "vbmc", "start", bmc.Hostname}, + } + if _, err := RunCommand(ctx, args); err != nil { + return err + } + } + + return nil +} diff --git a/playground/internal/helm/helm.go b/playground/internal/helm/helm.go new file mode 100644 index 00000000..6c6fd9de --- /dev/null +++ b/playground/internal/helm/helm.go @@ -0,0 +1,49 @@ +package helm + +import ( + "context" + "fmt" + "net/url" + "os/exec" +) + +const binary = "helm" + +type Args struct { + Cmd string + ReleaseName string + Chart *url.URL + Version string + CreateNamespace bool + Namespace string + Wait bool + SetArgs map[string]string + Kubeconfig string +} + +func Install(ctx context.Context, a Args) error { + args := []string{"install", a.ReleaseName, a.Chart.String()} + if a.Version != "" { + args = append(args, "--version", a.Version) + } + if a.CreateNamespace { + args = append(args, "--create-namespace") + } + if a.Namespace != "" { + args = append(args, "--namespace", a.Namespace) + } + if a.Wait { + args = append(args, "--wait") + } + for k, v := range a.SetArgs { + args = append(args, "--set", fmt.Sprintf("%s=%s", k, v)) + } + e := exec.CommandContext(context.Background(), binary, args...) + e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", a.Kubeconfig)} + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error deploying Tinkerbell stack: %s: out: %v", err, string(out)) + } + + return nil +} diff --git a/playground/internal/kind/kind.go b/playground/internal/kind/kind.go new file mode 100644 index 00000000..fefdfa92 --- /dev/null +++ b/playground/internal/kind/kind.go @@ -0,0 +1,54 @@ +package kind + +import ( + "context" + "fmt" + "os/exec" +) + +const binary = "kind" + +type Args struct { + Name string + Kubeconfig string +} + +func runKindClusterCommand(ctx context.Context, cmd string, c Args) error { + args := []string{cmd, "cluster"} + if c.Name != "" { + args = append(args, "--name", c.Name) + } + if c.Kubeconfig != "" { + args = append(args, "--kubeconfig", c.Kubeconfig) + } + e := exec.CommandContext(context.Background(), binary, args...) + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error creating kind cluster: %s: out: %v", err, string(out)) + } + + return nil +} + +func CreateCluster(ctx context.Context, c Args) error { + /* + kind create cluster --name playground --kubeconfig /tmp/kubeconfig + */ + args := Args{ + Name: c.Name, + Kubeconfig: c.Kubeconfig, + } + + return runKindClusterCommand(ctx, "create", args) +} + +func DeleteCluster(ctx context.Context, c Args) error { + /* + kind delete cluster --name playground + */ + args := Args{ + Name: c.Name, + } + + return runKindClusterCommand(ctx, "delete", args) +} diff --git a/playground/internal/kubectl/kubectl.go b/playground/internal/kubectl/kubectl.go new file mode 100644 index 00000000..e0c21b57 --- /dev/null +++ b/playground/internal/kubectl/kubectl.go @@ -0,0 +1,124 @@ +package kubectl + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" +) + +const binary = "kubectl" + +type Args struct { + Cmd string + AdditionalPrefixArgs []string + AdditionalSuffixArgs []string + Kubeconfig string +} + +// RunCommand runs a kubectl command with the given args +func RunCommand(ctx context.Context, c Args) (string, error) { + args := []string{c.Cmd} + args = append(args, c.AdditionalPrefixArgs...) + args = append(args, c.AdditionalSuffixArgs...) + + e := exec.CommandContext(context.Background(), binary, args...) + if c.Kubeconfig != "" { + e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.Kubeconfig)} + } + out, err := e.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to run container: cmd: %v err: %w: out: %s", fmt.Sprintf("[%v %v]", binary, strings.Join(args, " ")), err, out) + } + + return string(out), nil +} + +func GetNodeCidrs(ctx context.Context, kubeconfig string) ([]string, error) { + args := Args{ + Cmd: "get", + AdditionalPrefixArgs: []string{"nodes", "-o", "jsonpath={.items[*].spec.podCIDR}"}, + Kubeconfig: kubeconfig, + } + out, err := RunCommand(ctx, args) + if err != nil { + return nil, err + } + + cidrs := strings.Trim(string(out), "'") + return strings.Split(cidrs, " "), nil +} + +func ApplyFiles(ctx context.Context, kubeconfig string, files []string) error { + formatted := []string{} + for _, f := range files { + formatted = append(formatted, "-f", f) + } + + args := Args{ + Cmd: "apply", + AdditionalPrefixArgs: formatted, + Kubeconfig: kubeconfig, + } + _, err := RunCommand(ctx, args) + if err != nil { + return err + } + + return nil +} + +func generateTemplate(d any, tmpl string) (string, error) { + t := template.New("template") + t, err := t.Parse(tmpl) + if err != nil { + return "", err + } + buffer := new(bytes.Buffer) + if err := t.Execute(buffer, d); err != nil { + return "", err + } + + return buffer.String(), nil +} + +func KustomizeClusterYaml(outputDir string, name, kubeconfig string, sshAuthKeyFile string, kustomizeYaml string, namespace string, nodeLabel string) error { + /* + kubectl kustomize -o output/playground.yaml + */ + // get authorized key. ignore error if file doesn't exist as authorizedKey will be "" and the template will be unchanged + authorizedKey, _ := os.ReadFile(sshAuthKeyFile) + authorizedKey = []byte(strings.TrimSuffix(string(authorizedKey), "\n")) + s := struct { + SSHAuthorizedKey string + Namespace string + NodeLabel string + }{ + SSHAuthorizedKey: string(authorizedKey), + Namespace: namespace, + NodeLabel: nodeLabel, + } + patch, err := generateTemplate(s, kustomizeYaml) + if err != nil { + return err + } + + // write kustomization.yaml to output dir + if err := os.WriteFile(filepath.Join(outputDir, "kustomization.yaml"), []byte(patch), 0644); err != nil { + return err + } + cmd := "kubectl" + args := []string{"kustomize", outputDir, "-o", filepath.Join(outputDir, name+".yaml")} + e := exec.CommandContext(context.Background(), cmd, args...) + e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", kubeconfig)} + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error running kubectl kustomize: %s: out: %v", err, string(out)) + } + + return nil +} diff --git a/playground/internal/kubectl/kubectl_test.go b/playground/internal/kubectl/kubectl_test.go new file mode 100644 index 00000000..21533868 --- /dev/null +++ b/playground/internal/kubectl/kubectl_test.go @@ -0,0 +1,58 @@ +package kubectl + +import ( + "context" + "fmt" + "net" + "testing" + "time" +) + +func TestXxx(t *testing.T) { + out, err := GetNodeCidrs(context.Background(), "/home/tink/.kube/config") + if err != nil { + t.Fatalf("error getting trusted proxies: %s: out: %v", err, out) + } + t.Log(out) + t.Fail() +} + +func TestXxx2(t *testing.T) { + var trustedProxies []string + timeout := time.NewTimer(time.Minute) +LOOP: + for { + select { + case <-timeout.C: + t.Fatal(fmt.Errorf("unable to get node cidrs after 1 minute")) + default: + } + /* + cmd := "kubectl" + args := []string{"get", "nodes", "-o", "jsonpath='{.items[*].spec.podCIDR}'"} + e := exec.CommandContext(context.Background(), cmd, args...) + e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error getting trusted proxies: %s: out: %v", err, string(out)) + } + // strip quotes + trustedProxies = strings.Trim(string(out), "'") + */ + cidrs, err := GetNodeCidrs(context.Background(), "/home/tink/.kube/config") + if err != nil { + t.Fatal(fmt.Errorf("error getting node cidrs: %w", err)) + } + for _, c := range cidrs { + v, ipnet, _ := net.ParseCIDR(c) + if v != nil { + t.Log(v) + t.Log(ipnet) + trustedProxies = append(trustedProxies, ipnet.String()) + break LOOP + } + } + } + t.Log(trustedProxies) + t.Fail() +} diff --git a/playground/internal/libvirt/libvirt.go b/playground/internal/libvirt/libvirt.go new file mode 100644 index 00000000..d27584c4 --- /dev/null +++ b/playground/internal/libvirt/libvirt.go @@ -0,0 +1,35 @@ +package libvirt + +import ( + "context" + "fmt" + "net" + "os/exec" + "strings" +) + +func CreateVM(name string, netBridge string, mac net.HardwareAddr) error { + cmd := "virt-install" + args := []string{ + "--description", "CAPT VM", + "--ram", "2048", + "--vcpus", "2", + "--os-variant", "ubuntu20.04", + "--graphics", "vnc", + "--boot", "uefi,firmware.feature0.name=enrolled-keys,firmware.feature0.enabled=no,firmware.feature1.name=secure-boot,firmware.feature1.enabled=yes", + "--noautoconsole", + "--noreboot", + "--import", + "--connect", "qemu:///system", + } + args = append(args, "--name", name) + args = append(args, "--disk", fmt.Sprintf("path=/tmp/%v-disk.img,bus=virtio,size=10,sparse=yes", name)) + args = append(args, "--network", fmt.Sprintf("bridge:%s,mac=%s", netBridge, mac.String())) + e := exec.CommandContext(context.Background(), cmd, args...) + out, err := e.CombinedOutput() + if err != nil { + return fmt.Errorf("error creating: command: %v: error: %s: out: %v", fmt.Sprintf("%v %v", cmd, strings.Join(args, " ")), err, string(out)) + } + + return nil +} diff --git a/playground/internal/tinkerbell/marshal.go b/playground/internal/tinkerbell/marshal.go new file mode 100644 index 00000000..3ac60af6 --- /dev/null +++ b/playground/internal/tinkerbell/marshal.go @@ -0,0 +1,51 @@ +package tinkerbell + +import ( + "encoding/json" + "fmt" + + "gopkg.in/yaml.v3" +) + +func MarshalOrEmpty(h any) []byte { + b, err := Marshal(&h) + if err != nil { + return []byte{} + } + + return b +} + +// Marshal the object into JSON then convert +// JSON to YAML and returns the YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONToYAML Converts JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} diff --git a/playground/internal/internal.go b/playground/internal/tinkerbell/tinkerbell.go similarity index 74% rename from playground/internal/internal.go rename to playground/internal/tinkerbell/tinkerbell.go index 1de8a69c..333db5c7 100644 --- a/playground/internal/internal.go +++ b/playground/internal/tinkerbell/tinkerbell.go @@ -1,14 +1,12 @@ -package internal +package tinkerbell import ( - "encoding/json" "fmt" "net" "net/netip" rufio "github.com/tinkerbell/rufio/api/v1alpha1" "github.com/tinkerbell/tink/api/v1alpha1" - "gopkg.in/yaml.v3" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -141,46 +139,3 @@ func (d NodeData) BMCSecret(namespace string) corev1.Secret { }, } } - -func MarshalOrEmpty(h any) []byte { - b, err := Marshal(&h) - if err != nil { - return []byte{} - } - - return b -} - -// Marshal the object into JSON then convert -// JSON to YAML and returns the YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// JSONToYAML Converts JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} diff --git a/playground/main.go b/playground/main.go index 18e68827..d7702b57 100644 --- a/playground/main.go +++ b/playground/main.go @@ -1,79 +1,16 @@ package main import ( - "bytes" "context" - "crypto/rand" - "encoding/json" "errors" - "flag" "fmt" - "log" - "net" - "net/netip" "os" - "os/exec" "os/signal" - "path/filepath" - "strings" "syscall" - "text/template" "github.com/tinkerbell/cluster-api-provider/playground/cmd" - rufio "github.com/tinkerbell/rufio/api/v1alpha1" - "github.com/tinkerbell/tink/api/v1alpha1" - "gopkg.in/yaml.v3" - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -const ( - controlPlaneNodeRole nodeRole = "control-plane" - workerNodeRole nodeRole = "worker" - captRoleLabel captLabel = "capt-node-role" - clusterName = "playground" -) - -type captLabel string - -type nodeRole string - -type ymls []yml - -type yml struct { - data []byte - name string -} - -type cluster struct { - hardwareCount int - controlPlaneNodesCount int - workerNodesCount int - kubernetesVersion string - namespace string - outputDir string - kubeconfig string - tinkerbellStackVer string - sshAuthorizedKeyFile string - data []data -} - -type data struct { - Hostname string - Namespace string - Mac net.HardwareAddr - Nameservers []string - IP netip.Addr - Netmask net.IPMask - Gateway netip.Addr - Disk string - BMCHostname string - BMCIPPort netip.AddrPort - BMCUsername string - BMCPassword string - labels map[string]string -} - func main() { exitCode := 0 @@ -88,811 +25,4 @@ func main() { fmt.Fprintln(os.Stderr, err) exitCode = 127 } - return - - fs := flag.NewFlagSet("capt-playground", flag.ExitOnError) - pwd, err := os.Getwd() - if err != nil { - pwd = "./" - } - c := cluster{ - kubeconfig: filepath.Join(pwd, "output/kind.kubeconfig"), - } - fs.IntVar(&c.hardwareCount, "hardware-count", 4, "number of hardware to create") - fs.IntVar(&c.controlPlaneNodesCount, "control-plane-nodes-count", 1, "number of control plane nodes to create") - fs.IntVar(&c.workerNodesCount, "worker-nodes-count", 2, "number of worker nodes to create") - fs.StringVar(&c.namespace, "namespace", "tink-system", "namespace for all resources") - fs.StringVar(&c.kubernetesVersion, "kubernetes-version", "v1.23.5", "kubernetes version to install") - fs.StringVar(&c.outputDir, "output-dir", "output", "directory to all produced artifacts (yamls, kubeconfig, etc)") - fs.StringVar(&c.tinkerbellStackVer, "tinkerbell-stack-version", "0.4.2", "tinkerbell stack version to install") - fs.StringVar(&c.sshAuthorizedKeyFile, "ssh-authorized-key-file", "", "ssh authorized key file to add to nodes") - fs.Parse(os.Args[1:]) - - // We need the docker network created first so that other containers and VMs can connect to it. - log.Println("create kind cluster") - if err := c.createKindCluster(clusterName); err != nil { - log.Fatalf("error creating kind cluster: %s", err) - } - - // This runs before creating the data slice so that we can get the IP of the Virtual BMC container. - log.Println("Start Virtual BMC") - vbmcIP, err := startVirtualBMC("kind") - if err != nil { - log.Fatalf("error starting Virtual BMC: %s", err) - } - - // get the gateway of the kind network - gateway, err := getGateway("kind") - if err != nil { - log.Fatalf("error getting gateway: %s", err) - } - - subnet, err := getSubnet("kind") - if err != nil { - log.Fatalf("error getting subnet: %s", err) - } - - // Use the vbmcIP in order to determine the subnet for the KinD network. - // This is used to create the IP addresses for the VMs, Tinkerbell stack LB IP, and the KubeAPI server VIP. - base := fmt.Sprintf("%v.%v.100", vbmcIP.As4()[0], vbmcIP.As4()[1]) // x.x.100 - controlPlaneVIP := fmt.Sprintf("%v.%d", base, 100) // x.x.100.100 - tinkerbellVIP := fmt.Sprintf("%v.%d", base, 101) // x.x.100.101 - - c.data = make([]data, c.hardwareCount) - curControlPlaneNodesCount := 0 - curWorkerNodesCount := 0 - for i := 0; i < c.hardwareCount; i++ { - num := i + 1 - d := data{ - Hostname: fmt.Sprintf("node%v", num), - Namespace: c.namespace, - Mac: net.HardwareAddr{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, - Nameservers: []string{"8.8.8.8", "1.1.1.1"}, - IP: netip.MustParseAddr(fmt.Sprintf("%v.%d", base, num)), - Netmask: subnet, - Gateway: gateway, - Disk: "/dev/vda", - BMCHostname: vbmcIP.String(), - BMCIPPort: netip.MustParseAddrPort(fmt.Sprintf("0.0.0.0:623%v", num)), - BMCUsername: "admin", - BMCPassword: "password", - labels: map[string]string{}, - } - if m, err := GenerateRandMAC(); err == nil { - d.Mac = m - } - - if curControlPlaneNodesCount < c.controlPlaneNodesCount { - d.labels[captRoleLabel.String()] = controlPlaneNodeRole.String() - curControlPlaneNodesCount++ - } else if curWorkerNodesCount < c.workerNodesCount { - d.labels[captRoleLabel.String()] = workerNodeRole.String() - curWorkerNodesCount++ - } - c.data[i] = d - } - - log.Println("deploy Tinkerbell stack") - if err := c.deployTinkerbellStack(tinkerbellVIP); err != nil { - log.Fatalf("error deploying Tinkerbell stack: %s", err) - } - - log.Println("creating Tinkerbell Custom Resources") - if err := writeYamls(c.data, c.outputDir); err != nil { - log.Fatalf("error writing yamls: %s", err) - } - - log.Println("creating clusterctl.yaml") - if err := writeClusterctlYaml("output"); err != nil { - log.Fatalf("error writing clusterctl.yaml: %s", err) - } - - log.Println("running clusterctl init") - if err := c.clusterctlInit(c.outputDir, tinkerbellVIP); err != nil { - log.Fatalf("error running clusterctl init: %s", err) - } - - log.Println("running clusterctl generate cluster") - podCIDR := fmt.Sprintf("%v.100.0.0/16", vbmcIP.As4()[0]) // x.100.0.0/16 (172.25.0.0/16) - if err := c.clusterctlGenerateClusterYaml(c.outputDir, clusterName, c.namespace, c.controlPlaneNodesCount, c.workerNodesCount, c.kubernetesVersion, controlPlaneVIP, podCIDR); err != nil { - log.Fatalf("error running clusterctl generate cluster: %s", err) - } - if err := c.kustomizeClusterYaml(c.outputDir); err != nil { - log.Fatalf("error running kustomize: %s", err) - } - - log.Println("getting KinD bridge") - bridge, err := getKinDBridge("kind") - if err != nil { - log.Fatalf("error getting KinD bridge: %s", err) - } - log.Println("creating VMs") - if err := createVMs(c.data, bridge); err != nil { - log.Fatalf("error creating vms: %s\n", err) - } - - log.Println("Register and start Virtual BMCs for all nodes") - if err := registerAndStartVirtualBMCs(c.data); err != nil { - log.Fatalf("error registering and starting Virtual BMCs: %s", err) - } - - log.Println("update Rufio CRDs") - if err := c.updateRufioCRDs(); err != nil { - log.Fatalf("error updating Rufio CRDs: %s", err) - } - - log.Println("apply all Tinkerbell manifests") - if err := c.applyAllTinkerbellManifests(); err != nil { - log.Fatalf("error applying all Tinkerbell manifests: %s", err) - } - -} - -func (c cluster) applyAllTinkerbellManifests() error { - /* - kubectl apply -f output/apply/ - */ - cmd := "kubectl" - args := []string{"apply", "-f", filepath.Join(c.outputDir, "apply")} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error applying all Tinkerbell manifests: %s: out: %v", err, string(out)) - } - - return nil -} - -func (c cluster) updateRufioCRDs() error { - /* - kubectl delete crd machines.bmc.tinkerbell.org tasks.bmc.tinkerbell.org - kubectl apply -f https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_machines.yaml https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_tasks.yaml - */ - cmd := "kubectl" - args := []string{"delete", "crd", "machines.bmc.tinkerbell.org", "tasks.bmc.tinkerbell.org"} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error deleting Rufio CRDs: %s: out: %v", err, string(out)) - } - - args = []string{ - "apply", - "-f", "https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_machines.yaml", - "-f", "https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_tasks.yaml", - } - e = exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err = e.CombinedOutput() - if err != nil { - return fmt.Errorf("error applying Rufio CRDs: %s: out: %v", err, string(out)) - } - - return nil -} - -func getSubnet(dockerNet string) (net.IPMask, error) { - /* - docker network inspect kind -f '{{range .IPAM.Config}}{{.Subnet}},{{end}}' - result: 172.20.0.0/16,fc00:f853:ccd:e793::/64, - */ - cmd := "docker" - args := []string{"network", "inspect", dockerNet, "-f", "'{{range .IPAM.Config}}{{.Subnet}},{{end}}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return nil, fmt.Errorf("error getting subnet: %s: out: %v", err, string(out)) - } - - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - subnets := strings.Split(o, ",") - for _, s := range subnets { - _, ipnet, err := net.ParseCIDR(s) - if err == nil { - if ipnet.IP.To4() != nil { - return ipnet.Mask, nil - } - } - } - - return nil, fmt.Errorf("unable to determine docker network subnet mask, err from command: %s: stdout: %v", err, string(out)) -} - -func getGateway(dockerNet string) (netip.Addr, error) { - /* - docker network inspect kind -f '{{range .IPAM.Config}}{{.Gateway}},{{end}}' - result: 172.20.0.1, - */ - cmd := "docker" - args := []string{"network", "inspect", dockerNet, "-f", "'{{range .IPAM.Config}}{{.Gateway}},{{end}}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error getting gateway: %s: out: %v", err, string(out)) - } - - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - subnets := strings.Split(o, ",") - for _, s := range subnets { - ip, err := netip.ParseAddr(s) - if err == nil && ip.Is4() { - return ip, nil - } - } - - return netip.Addr{}, fmt.Errorf("unable to determine docker network gateway, err from command: %s: stdout: %v", err, string(out)) -} - -func registerAndStartVirtualBMCs(ds []data) error { - /* - for i in {1..4}; do echo $i; docker exec virtualbmc vbmc add --username admin --password password --port "623$i" --no-daemon "node$i"; done - for i in {1..4}; do echo $i; docker exec virtualbmc vbmc start "node$i"; done - */ - cmd := "docker" - for _, d := range ds { - d := d - args := []string{ - "exec", "virtualbmc", - "vbmc", "add", - "--username", d.BMCUsername, - "--password", d.BMCPassword, - "--port", fmt.Sprintf("%v", d.BMCIPPort.Port()), - "--no-daemon", d.Hostname, - } - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error adding virtualbmc: %+v: error: %s: out: %v", d, err, string(out)) - } - - args = []string{ - "exec", "virtualbmc", - "vbmc", "start", - d.Hostname, - } - e = exec.CommandContext(context.Background(), cmd, args...) - out, err = e.CombinedOutput() - if err != nil { - return fmt.Errorf("error starting virtualbmc: %+v: error: %s: out: %v", d, err, string(out)) - } - } - - return nil -} - -func startVirtualBMC(dockerNet string) (netip.Addr, error) { - /* - docker run -d --rm --network kind -v /var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro -v /var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock --name virtualbmc capt-playground:v2 - */ - cmd := "docker" - args := []string{ - "run", "-d", "--rm", - "--network", dockerNet, - "-v", "/var/run/libvirt/libvirt-sock-ro:/var/run/libvirt/libvirt-sock-ro", - "-v", "/var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock", - "--name", "virtualbmc", - "capt-playground:v2", - } - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error starting Virtual BMC: %s: out: %v", err, string(out)) - } - - // get the IP of the container - args = []string{ - "inspect", "-f", "'{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}'", "virtualbmc", - } - e = exec.CommandContext(context.Background(), cmd, args...) - out, err = e.CombinedOutput() - if err != nil { - return netip.Addr{}, fmt.Errorf("error getting Virtual BMC IP: %s: out: %v", err, string(out)) - } - - o := strings.Trim(strings.Trim(string(out), "\n"), "'") - ip, err := netip.ParseAddr(o) - if err != nil { - return netip.Addr{}, fmt.Errorf("error parsing Virtual BMC IP: %s: out: %v", err, string(out)) - } - - return ip, nil -} - -func (c cluster) createKindCluster(name string) error { - /* - kind create cluster --name playground --kubeconfig output/kind.kubeconfig - */ - cmd := "kind" - args := []string{"create", "cluster", "--name", name, "--kubeconfig", filepath.Join(c.outputDir, "kind.kubeconfig")} - e := exec.CommandContext(context.Background(), cmd, args...) - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error creating kind cluster: %s: out: %v", err, string(out)) - } - - return nil -} - -func (c cluster) deployTinkerbellStack(tinkVIP string) error { - /* - trusted_proxies=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}') - LB_IP=x.x.x.x - helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version "$STACK_CHART_VERSION" --create-namespace --namespace tink-system --wait --set "smee.trustedProxies={${trusted_proxies}}" --set "hegel.trustedProxies={${trusted_proxies}}" --set "stack.loadBalancerIP=$LB_IP" --set "smee.publicIP=$LB_IP" - */ - var trustedProxies string - for { - cmd := "kubectl" - args := []string{"get", "nodes", "-o", "jsonpath='{.items[*].spec.podCIDR}'"} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error getting trusted proxies: %s: out: %v", err, string(out)) - } - // strip quotes - trustedProxies = strings.Trim(string(out), "'") - v, _, _ := net.ParseCIDR(trustedProxies) - if v != nil { - break - } - } - - cmd := "helm" - args := []string{ - "install", "tink-stack", "oci://ghcr.io/tinkerbell/charts/stack", - "--version", c.tinkerbellStackVer, - "--create-namespace", "--namespace", c.namespace, - "--wait", - "--set", fmt.Sprintf("smee.trustedProxies={%s}", trustedProxies), - "--set", fmt.Sprintf("hegel.trustedProxies={%s}", trustedProxies), - "--set", fmt.Sprintf("stack.loadBalancerIP=%s", tinkVIP), - "--set", fmt.Sprintf("smee.publicIP=%s", tinkVIP), - "--set", "rufio.image=quay.io/tinkerbell/rufio:latest", - } - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error deploying Tinkerbell stack: %s: out: %v", err, string(out)) - } - - return nil -} - -func (c cluster) clusterctlGenerateClusterYaml(outputDir string, clusterName string, namespace string, numCP int, numWorker int, k8sVer string, cpVIP string, podCIDR string) error { - /* - CONTROL_PLANE_VIP=172.18.18.17 POD_CIDR=172.25.0.0/16 clusterctl generate cluster playground --config outputDir/clusterctl.yaml --kubernetes-version v1.23.5 --control-plane-machine-count=1 --worker-machine-count=2 --target-namespace=tink-system --write-to playground.yaml - */ - cmd := "clusterctl" - args := []string{ - "generate", "cluster", clusterName, - "--config", filepath.Join(outputDir, "clusterctl.yaml"), - "--kubernetes-version", fmt.Sprintf("%v", k8sVer), - fmt.Sprintf("--control-plane-machine-count=%v", numCP), - fmt.Sprintf("--worker-machine-count=%v", numWorker), - fmt.Sprintf("--target-namespace=%v", namespace), - "--write-to", filepath.Join(outputDir, fmt.Sprintf("%v.yaml", clusterName)), - } - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{ - fmt.Sprintf("CONTROL_PLANE_VIP=%s", cpVIP), - fmt.Sprintf("POD_CIDR=%v", podCIDR), - fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig), - "XDG_CONFIG_HOME=/home/tink/xdg", - "XDG_CONFIG_DIRS=/home/tink/xdg", - "XDG_STATE_HOME=/home/tink/xdg", - "XDG_CACHE_HOME=/home/tink/xdg", - "XDG_RUNTIME_DIR=/home/tink/xdg", - "XDG_DATA_HOME=/home/tink/xdg", - "XDG_DATA_DIRS=/home/tink/xdg", - } - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error running clusterctl generate cluster: %s: out: %v", err, string(out)) - } - - return nil -} - -var kustomizeYaml = `apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: tink-system -resources: - - playground.yaml -patches: - - target: - group: infrastructure.cluster.x-k8s.io - kind: TinkerbellMachineTemplate - name: ".*control-plane.*" - version: v1beta1 - patch: |- - - op: add - path: /spec/template/spec - value: - hardwareAffinity: - required: - - labelSelector: - matchLabels: - capt-node-role: control-plane - - target: - group: infrastructure.cluster.x-k8s.io - kind: TinkerbellMachineTemplate - name: ".*worker.*" - version: v1beta1 - patch: |- - - op: add - path: /spec/template/spec - value: - hardwareAffinity: - required: - - labelSelector: - matchLabels: - capt-node-role: worker -{{- if .SSHAuthorizedKey }} - - target: - group: bootstrap.cluster.x-k8s.io - kind: KubeadmConfigTemplate - name: "playground-.*" - version: v1beta1 - patch: |- - - op: add - path: /spec/template/spec/users - value: - - name: tink - sudo: ALL=(ALL) NOPASSWD:ALL - sshAuthorizedKeys: - - {{ .SSHAuthorizedKey }} - - target: - group: controlplane.cluster.x-k8s.io - kind: KubeadmControlPlane - name: "playground-.*" - version: v1beta1 - patch: |- - - op: add - path: /spec/kubeadmConfigSpec/users - value: - - name: tink - sudo: ALL=(ALL) NOPASSWD:ALL - sshAuthorizedKeys: - - {{ .SSHAuthorizedKey }} -{{ end -}} -` - -func generateTemplate(d any, tmpl string) (string, error) { - t := template.New("template") - t, err := t.Parse(tmpl) - if err != nil { - return "", err - } - buffer := new(bytes.Buffer) - if err := t.Execute(buffer, d); err != nil { - return "", err - } - - return buffer.String(), nil -} - -func (c cluster) kustomizeClusterYaml(outputDir string) error { - /* - kubectl kustomize -o output/playground.yaml - */ - // get authorized key. ignore error if file doesn't exist as authorizedKey will be "" and the template will be unchanged - authorizedKey, _ := os.ReadFile(c.sshAuthorizedKeyFile) - authorizedKey = []byte(strings.TrimSuffix(string(authorizedKey), "\n")) - patch, err := generateTemplate(struct{ SSHAuthorizedKey string }{string(authorizedKey)}, kustomizeYaml) - if err != nil { - return err - } - - // write kustomization.yaml to output dir - if err := os.WriteFile(filepath.Join(outputDir, "kustomization.yaml"), []byte(patch), 0644); err != nil { - return err - } - cmd := "kubectl" - args := []string{"kustomize", outputDir, "-o", filepath.Join(outputDir, "playground.yaml")} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)} - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error running kubectl kustomize: %s: out: %v", err, string(out)) - } - - return nil -} - -func (c cluster) clusterctlInit(outputDir string, tinkVIP string) error { - /* - TINKERBELL_IP=172.18.18.18 clusterctl --config output/clusterctl.yaml init --infrastructure tinkerbell - */ - cmd := "clusterctl" - args := []string{"init", "--config", filepath.Join(outputDir, "clusterctl.yaml"), "--infrastructure", "tinkerbell"} - e := exec.CommandContext(context.Background(), cmd, args...) - e.Env = []string{ - fmt.Sprintf("TINKERBELL_IP=%s", tinkVIP), - fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig), - "XDG_CONFIG_HOME=/home/tink/xdg", - "XDG_CONFIG_DIRS=/home/tink/xdg", - "XDG_STATE_HOME=/home/tink/xdg", - "XDG_CACHE_HOME=/home/tink/xdg", - "XDG_RUNTIME_DIR=/home/tink/xdg", - "XDG_DATA_HOME=/home/tink/xdg", - "XDG_DATA_DIRS=/home/tink/xdg", - } - out, err := e.CombinedOutput() - if err != nil { - return fmt.Errorf("error running clusterctl init: %s: out: %v", err, string(out)) - } - - return nil -} - -func writeClusterctlYaml(outputDir string) error { - /* - mkdir -p ~/.cluster-api - cat >> ~/.cluster-api/clusterctl.yaml <