Skip to content

Commit

Permalink
Add CAPT version and image flags, Add command logging and auditing:
Browse files Browse the repository at this point in the history
Being able to specify the image version is helpful
when you want to deploy the CAPT container
with a non release tag.

For troubleshooting the auditing and logging will be
very helpful.

Signed-off-by: Jacob Weinstock <[email protected]>
  • Loading branch information
jacobweinstock committed Jan 19, 2024
1 parent 92b69db commit adf4734
Show file tree
Hide file tree
Showing 10 changed files with 256 additions and 81 deletions.
99 changes: 64 additions & 35 deletions playground/cmd/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"crypto/rand"
"flag"
"fmt"
"io"
"log"
"net"
"net/netip"
Expand Down Expand Up @@ -46,6 +47,10 @@ type Create struct {
TinkerbellStackVersion string
// SSHPublicKeyFile is the file location of the SSH public key that will be added to all control plane and worker nodes in the workload cluster
SSHPublicKeyFile string
// CAPTVersion is the version of CAPT that will be used to create the workload cluster
CAPTVersion string
// CAPTImageTag is the image tag of CAPT that will be used to create the workload cluster
CAPTImageTag string
// nodeData holds data for each node that will be created
nodeData []tinkerbell.NodeData
rootConfig *rootConfig
Expand Down Expand Up @@ -81,6 +86,8 @@ func (c *Create) registerFlags(fs *flag.FlagSet) {
fs.StringVar(&c.Namespace, "namespace", "capt-playground", "namespace to use for all Objects created")
fs.StringVar(&c.TinkerbellStackVersion, "tinkerbell-stack-version", "0.4.2", "version of the Tinkerbell stack that will be deployed to the management cluster")
fs.StringVar(&c.SSHPublicKeyFile, "ssh-public-key-file", "", "file location of the SSH public key that will be added to all control plane and worker nodes in the workload cluster")
fs.StringVar(&c.CAPTVersion, "capt-version", "0.4.0", "version of CAPT that will be installed in the management cluster")
fs.StringVar(&c.CAPTImageTag, "capt-image-tag", "0.4.0", "container image tag of CAPT manager that will be deployed to the management cluster")
}

func (c *Create) exec(ctx context.Context) error {
Expand Down Expand Up @@ -110,9 +117,23 @@ func (c *Create) exec(ctx context.Context) error {
if err := os.WriteFile(c.rootConfig.StateFile, d, 0644); err != nil {
return fmt.Errorf("failed to write state file: %w", err)
}
p := filepath.Join(c.OutputDir, "apply")
if err := os.MkdirAll(p, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("error creating output dir: %w", err)
}
auditWriter, err := os.OpenFile(filepath.Join(c.OutputDir, "audit.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)
if err != nil {
return fmt.Errorf("error opening audit log: %w", err)
}
defer auditWriter.Close()
outputWriter, err := os.OpenFile(filepath.Join(c.OutputDir, "output.log"), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0755)
if err != nil {
return fmt.Errorf("error opening output log: %w", err)
}
defer outputWriter.Close()
// We need the docker network created first so that other containers and VMs can connect to it.
log.Println("create kind cluster")
if err := kind.CreateCluster(ctx, kind.Args{Name: "playground", Kubeconfig: c.kubeconfig}); err != nil {
if err := kind.CreateCluster(ctx, kind.Args{Name: "playground", Kubeconfig: c.kubeconfig, AuditWriter: auditWriter}); err != nil {
return fmt.Errorf("error creating kind cluster: %w", err)
}

Expand All @@ -130,12 +151,13 @@ func (c *Create) exec(ctx context.Context) error {
}

// get the gateway of the kind network
gateway, err := docker.IPv4GatewayFrom("kind")
do := docker.Opts{AuditWriter: auditWriter}
gateway, err := do.IPv4GatewayFrom("kind")
if err != nil {
return fmt.Errorf("error getting gateway: %s", err)
}

subnet, err := docker.IPv4SubnetFrom("kind")
subnet, err := do.IPv4SubnetFrom("kind")
if err != nil {
return fmt.Errorf("error getting subnet: %s", err)
}
Expand All @@ -146,23 +168,24 @@ func (c *Create) exec(ctx context.Context) error {
log.Println("deploy Tinkerbell stack")
base := fmt.Sprintf("%v.%v.100", vbmcIP.As4()[0], vbmcIP.As4()[1]) // x.x.100
tinkerbellVIP := fmt.Sprintf("%v.%d", base, 101) // x.x.100.101
if err := c.deployTinkerbellStack(tinkerbellVIP); err != nil {
if err := c.deployTinkerbellStack(tinkerbellVIP, auditWriter, outputWriter); err != nil {
return fmt.Errorf("error deploying Tinkerbell stack: %s", err)
}

log.Println("creating Tinkerbell Custom Resources")
if err := writeYamls(c.nodeData, c.OutputDir, c.Namespace); err != nil {
if err := writeYamls(c.nodeData, p, c.Namespace); err != nil {
return fmt.Errorf("error writing yamls: %s", err)
}

log.Println("create VMs")
bridge, err := docker.LinuxBridgeFrom("kind")
bridge, err := do.LinuxBridgeFrom("kind")
if err != nil {
return fmt.Errorf("error during VM creation: %w", err)
}
for _, d := range c.nodeData {
d := d
if err := libvirt.CreateVM(d.Hostname, bridge, d.MACAddress); err != nil {
lo := libvirt.Opts{AuditWriter: auditWriter}
if err := lo.CreateVM(d.Hostname, bridge, d.MACAddress); err != nil {
return fmt.Errorf("error during VM creation: %w", err)
}
}
Expand All @@ -176,6 +199,7 @@ func (c *Create) exec(ctx context.Context) error {
Port: fmt.Sprintf("%d", d.BMCIP.Port()),
}
vbmc.BMCInfo = append(vbmc.BMCInfo, n)
vbmc.AuditWriter = auditWriter
}

log.Println("starting Virtual BMCs")
Expand All @@ -189,8 +213,10 @@ func (c *Create) exec(ctx context.Context) error {
log.Println("update Rufio CRDs")
args := kubectl.Args{
Cmd: "delete",
AdditionalPrefixArgs: []string{"crd", "machines.bmc.tinkerbell.org", "tasks.bmc.tinkerbell.org"},
AdditionalSuffixArgs: []string{"crd", "machines.bmc.tinkerbell.org", "tasks.bmc.tinkerbell.org"},
Kubeconfig: c.kubeconfig,
CacheDir: c.OutputDir,
AuditWriter: auditWriter,
}
if _, err := kubectl.RunCommand(context.Background(), args); err != nil {
return fmt.Errorf("error deleting Rufio CRDs: %w", err)
Expand All @@ -199,32 +225,38 @@ func (c *Create) exec(ctx context.Context) error {
"https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_machines.yaml",
"https://raw.githubusercontent.com/tinkerbell/rufio/main/config/crd/bases/bmc.tinkerbell.org_tasks.yaml",
}
if err := kubectl.ApplyFiles(context.Background(), c.kubeconfig, rufioCRDs); err != nil {
ko := kubectl.Opts{
Kubeconfig: c.kubeconfig,
CacheDir: c.OutputDir,
AuditWriter: auditWriter,
}
if err := ko.ApplyFiles(context.Background(), rufioCRDs); err != nil {
return fmt.Errorf("update Rufio CRDs: %w", err)
}

log.Println("apply all Tinkerbell manifests")
if err := kubectl.ApplyFiles(context.Background(), c.kubeconfig, []string{filepath.Join(c.OutputDir, "apply") + "/"}); err != nil {
if err := ko.ApplyFiles(context.Background(), []string{filepath.Join(c.OutputDir, "apply") + "/"}); err != nil {
return fmt.Errorf("error applying Tinkerbell manifests: %w", err)
}

log.Println("creating clusterctl.yaml")
if err := capi.ClusterctlYamlToDisk(c.OutputDir); err != nil {
if err := capi.ClusterctlYamlToDisk(c.OutputDir, c.CAPTVersion, c.CAPTImageTag); err != nil {
return fmt.Errorf("error creating clusterctl.yaml: %w", err)
}

capiOpts := capi.Opts{AuditWriter: auditWriter}
log.Println("running clusterctl init")
if capi.ClusterctlInit(c.OutputDir, c.kubeconfig, tinkerbellVIP); err != nil {
if _, err := capi.ClusterctlInit(c.OutputDir, c.kubeconfig, tinkerbellVIP, capiOpts); err != nil {
return fmt.Errorf("error running clusterctl init: %w", err)
}

log.Println("running clusterctl generate cluster")
podCIDR := fmt.Sprintf("%v.100.0.0/16", vbmcIP.As4()[0]) // x.100.0.0/16 (172.25.0.0/16)
controlPlaneVIP := fmt.Sprintf("%v.%d", base, 100) // x.x.100.100
if err := capi.ClusterYamlToDisk(c.OutputDir, c.ClusterName, c.Namespace, strconv.Itoa(c.ControlPlaneNodes), strconv.Itoa(c.WorkerNodes), c.KubernetesVersion, controlPlaneVIP, podCIDR, c.kubeconfig); err != nil {
controlPlaneVIP := fmt.Sprintf("%v.%d", base, 100) // x.x.100.100
if err := capi.ClusterYamlToDisk(c.OutputDir, c.ClusterName, c.Namespace, strconv.Itoa(c.ControlPlaneNodes), strconv.Itoa(c.WorkerNodes), c.KubernetesVersion, controlPlaneVIP, podCIDR, c.kubeconfig, capiOpts); err != nil {
return fmt.Errorf("error running clusterctl generate cluster: %w", err)
}
if err := kubectl.KustomizeClusterYaml(c.OutputDir, c.ClusterName, c.kubeconfig, c.SSHPublicKeyFile, capi.KustomizeYaml, c.Namespace, string(CAPTRole)); err != nil {
if err := ko.KustomizeClusterYaml(c.OutputDir, c.ClusterName, c.SSHPublicKeyFile, capi.KustomizeYaml, c.Namespace, string(CAPTRole)); err != nil {
return fmt.Errorf("error running kustomize: %w", err)
}

Expand Down Expand Up @@ -282,34 +314,32 @@ func GenerateRandMAC() (net.HardwareAddr, error) {
return buf, nil
}

func (c *Create) deployTinkerbellStack(tinkVIP string) error {
func (c *Create) deployTinkerbellStack(tinkVIP string, auditWriter, outputWriter io.Writer) error {
/*
trusted_proxies=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}')
LB_IP=x.x.x.x
helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version "$STACK_CHART_VERSION" --create-namespace --namespace tink-system --wait --set "smee.trustedProxies={${trusted_proxies}}" --set "hegel.trustedProxies={${trusted_proxies}}" --set "stack.loadBalancerIP=$LB_IP" --set "smee.publicIP=$LB_IP"
*/
var trustedProxies []string
timeout := time.NewTimer(time.Minute)
auditOnce := true
LOOP:
for {
select {
case <-timeout.C:
return fmt.Errorf("unable to get node cidrs after 1 minute")
default:
}
/*
cmd := "kubectl"
args := []string{"get", "nodes", "-o", "jsonpath='{.items[*].spec.podCIDR}'"}
e := exec.CommandContext(context.Background(), cmd, args...)
e.Env = []string{fmt.Sprintf("KUBECONFIG=%s", c.kubeconfig)}
out, err := e.CombinedOutput()
if err != nil {
return fmt.Errorf("error getting trusted proxies: %s: out: %v", err, string(out))
}
// strip quotes
trustedProxies = strings.Trim(string(out), "'")
*/
cidrs, err := kubectl.GetNodeCidrs(context.Background(), c.kubeconfig)
ko := kubectl.Opts{
Kubeconfig: c.kubeconfig,
CacheDir: c.OutputDir,
}
if auditOnce {
ko.AuditWriter = auditWriter
auditOnce = false
}

cidrs, err := ko.GetNodeCidrs(context.Background())
if err != nil {
return fmt.Errorf("error getting node cidrs: %w", err)
}
Expand Down Expand Up @@ -340,7 +370,10 @@ LOOP:
"smee.publicIP": tinkVIP,
"rufio.image": "quay.io/tinkerbell/rufio:latest",
},
Kubeconfig: c.kubeconfig,
Kubeconfig: c.kubeconfig,
CacheDir: c.OutputDir,
AuditWriter: auditWriter,
OutputWriter: outputWriter,
}
if err := helm.Install(context.Background(), a); err != nil {
return fmt.Errorf("error deploying Tinkerbell stack: %w", err)
Expand All @@ -350,10 +383,6 @@ LOOP:
}

func writeYamls(ds []tinkerbell.NodeData, outputDir string, namespace string) error {
p := filepath.Join(outputDir, "apply")
if err := os.MkdirAll(p, 0755); err != nil && !os.IsExist(err) {
return err
}
for _, d := range ds {
y := []struct {
name string
Expand All @@ -365,7 +394,7 @@ func writeYamls(ds []tinkerbell.NodeData, outputDir string, namespace string) er
}

for _, yaml := range y {
if err := os.WriteFile(filepath.Join(p, yaml.name), yaml.data, 0644); err != nil {
if err := os.WriteFile(filepath.Join(outputDir, yaml.name), yaml.data, 0644); err != nil {
return err
}
}
Expand Down
34 changes: 26 additions & 8 deletions playground/internal/capi/capi.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,46 @@ package capi
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"

"github.com/tinkerbell/cluster-api-provider/playground/internal/exec"
)

const (
binary = "clusterctl"
clusterctlYaml = "clusterctl.yaml"
)

func ClusterctlYamlToDisk(outputDir string) error {
type Opts struct {
AuditWriter io.Writer
}

func ClusterctlYamlToDisk(outputDir string, releaseVer, imageTag string) error {
contents := fmt.Sprintf(`providers:
- name: "tinkerbell"
url: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases/v%v/infrastructure-components.yaml"
type: "InfrastructureProvider"`, "0.4.0")
type: "InfrastructureProvider"
images:
infrastructure-tinkerbell:
tag: %v
`, releaseVer, imageTag)

return os.WriteFile(filepath.Join(outputDir, clusterctlYaml), []byte(contents), 0644)
}

func ClusterctlInit(outputDir, kubeconfig, tinkerbellVIP string) error {
func ClusterctlInit(outputDir, kubeconfig, tinkerbellVIP string, o Opts) (output string, err error) {
/*
TINKERBELL_IP=172.18.18.18 clusterctl --config output/clusterctl.yaml init --infrastructure tinkerbell
*/

args := []string{"init", "--config", filepath.Join(outputDir, clusterctlYaml), "--infrastructure", "tinkerbell"}
args := []string{"init", "--config", filepath.Join(outputDir, clusterctlYaml), "--infrastructure", "tinkerbell", "-v5"}
e := exec.CommandContext(context.Background(), binary, args...)
e.Env = []string{
fmt.Sprintf("TINKERBELL_IP=%s", tinkerbellVIP),
fmt.Sprintf("KUBECONFIG=%s", kubeconfig),
"CLUSTERCTL_DISABLE_VERSIONCHECK=true",
"XDG_CONFIG_HOME=/tmp/xdg",
"XDG_CONFIG_DIRS=/tmp/xdg",
"XDG_STATE_HOME=/tmp/xdg",
Expand All @@ -40,15 +51,18 @@ func ClusterctlInit(outputDir, kubeconfig, tinkerbellVIP string) error {
"XDG_DATA_HOME=/tmp/xdg",
"XDG_DATA_DIRS=/tmp/xdg",
}
if o.AuditWriter != nil {
e.AuditWriter = o.AuditWriter
}
out, err := e.CombinedOutput()
if err != nil {
return fmt.Errorf("error running clusterctl init: %s: out: %v", err, string(out))
return "", fmt.Errorf("error running clusterctl init: %s: out: %v", err, string(out))
}

return nil
return string(out), nil
}

func ClusterYamlToDisk(outputDir, clusterName, namespace, cpNodeNum, workerNodeNum, k8sVer, cpVIP, podCIDR, kubeconfig string) error {
func ClusterYamlToDisk(outputDir, clusterName, namespace, cpNodeNum, workerNodeNum, k8sVer, cpVIP, podCIDR, kubeconfig string, o Opts) error {
/*
CONTROL_PLANE_VIP=172.18.18.17 POD_CIDR=172.25.0.0/16 clusterctl generate cluster playground --config outputDir/clusterctl.yaml --kubernetes-version v1.23.5 --control-plane-machine-count=1 --worker-machine-count=2 --target-namespace=tink-system --write-to playground.yaml
*/
Expand All @@ -66,6 +80,7 @@ func ClusterYamlToDisk(outputDir, clusterName, namespace, cpNodeNum, workerNodeN
fmt.Sprintf("CONTROL_PLANE_VIP=%s", cpVIP),
fmt.Sprintf("POD_CIDR=%v", podCIDR),
fmt.Sprintf("KUBECONFIG=%s", kubeconfig),
"CLUSTERCTL_DISABLE_VERSIONCHECK=true",
"XDG_CONFIG_HOME=/tmp/xdg",
"XDG_CONFIG_DIRS=/tmp/xdg",
"XDG_STATE_HOME=/tmp/xdg",
Expand All @@ -74,6 +89,9 @@ func ClusterYamlToDisk(outputDir, clusterName, namespace, cpNodeNum, workerNodeN
"XDG_DATA_HOME=/tmp/xdg",
"XDG_DATA_DIRS=/tmp/xdg",
}
if o.AuditWriter != nil {
e.AuditWriter = o.AuditWriter
}
out, err := e.CombinedOutput()
if err != nil {
return fmt.Errorf("error running clusterctl generate cluster: %s: out: %v", err, string(out))
Expand Down
Loading

0 comments on commit adf4734

Please sign in to comment.