diff --git a/api/v1beta1/elfmachine_types.go b/api/v1beta1/elfmachine_types.go index d24df814..bc1db041 100644 --- a/api/v1beta1/elfmachine_types.go +++ b/api/v1beta1/elfmachine_types.go @@ -228,6 +228,21 @@ func (m *ElfMachine) SetVMDisconnectionTimestamp(timestamp *metav1.Time) { } } +// GetNetworkDevicesRequiringIP returns a slice of NetworkDeviceSpec which requires DHCP IP or static IP. +func (m *ElfMachine) GetNetworkDevicesRequiringIP() []NetworkDeviceSpec { + networkDevices := []NetworkDeviceSpec{} + + for index := range m.Spec.Network.Devices { + if m.Spec.Network.Devices[index].NetworkType == NetworkTypeNone { + continue + } + + networkDevices = append(networkDevices, m.Spec.Network.Devices[index]) + } + + return networkDevices +} + func (m *ElfMachine) GetVMDisconnectionTimestamp() *metav1.Time { if m.Annotations == nil { return nil diff --git a/controllers/elfmachine_controller.go b/controllers/elfmachine_controller.go index 56afecf8..ff683f81 100644 --- a/controllers/elfmachine_controller.go +++ b/controllers/elfmachine_controller.go @@ -958,8 +958,10 @@ func (r *ElfMachineReconciler) reconcileNode(ctx *context.MachineContext, vm *mo return true, nil } -// If the VM is powered on then issue requeues until all of the VM's -// networks have IP addresses. +// Ensure all the VM's NICs get IP addresses, otherwise requeue. +// +// In the scenario with many virtual machines, it could be slow for SMTX OS to synchronize VM information via vmtools. +// So if Tower API returns empty IP address for the VM's 1st NIC, try to get its IP address from the corresponding K8s Node. func (r *ElfMachineReconciler) reconcileNetwork(ctx *context.MachineContext, vm *models.VM) (ret bool, reterr error) { defer func() { if reterr != nil { @@ -970,38 +972,62 @@ func (r *ElfMachineReconciler) reconcileNetwork(ctx *context.MachineContext, vm } }() + ctx.ElfMachine.Status.Network = []infrav1.NetworkStatus{} + ctx.ElfMachine.Status.Addresses = []clusterv1.MachineAddress{} + // A Map of IP to MachineAddress + ipToMachineAddressMap := make(map[string]clusterv1.MachineAddress) + nics, err := ctx.VMService.GetVMNics(*vm.ID) if err != nil { return false, err } - networkStatuses := make([]infrav1.NetworkStatus, 0, len(nics)) for i := 0; i < len(nics); i++ { nic := nics[i] - if service.GetTowerString(nic.IPAddress) == "" { - continue - } + ip := service.GetTowerString(nic.IPAddress) - networkStatuses = append(networkStatuses, infrav1.NetworkStatus{ - IPAddrs: []string{service.GetTowerString(nic.IPAddress)}, + // Add to Status.Network even if IP is empty. + ctx.ElfMachine.Status.Network = append(ctx.ElfMachine.Status.Network, infrav1.NetworkStatus{ + IPAddrs: []string{ip}, MACAddr: service.GetTowerString(nic.MacAddress), }) - } - ctx.ElfMachine.Status.Network = networkStatuses - if len(networkStatuses) < len(ctx.ElfMachine.Spec.Network.Devices) { - return false, nil - } + if ip == "" { + continue + } - ipAddrs := make([]clusterv1.MachineAddress, 0, len(ctx.ElfMachine.Status.Network)) - for _, netStatus := range ctx.ElfMachine.Status.Network { - ipAddrs = append(ipAddrs, clusterv1.MachineAddress{ + ipToMachineAddressMap[ip] = clusterv1.MachineAddress{ Type: clusterv1.MachineInternalIP, - Address: netStatus.IPAddrs[0], - }) + Address: ip, + } + } + + networkDevicesRequiringIP := ctx.ElfMachine.GetNetworkDevicesRequiringIP() + + if len(ipToMachineAddressMap) < len(networkDevicesRequiringIP) { + // Try to get VM NIC IP address from the K8s Node. + nodeIP, err := r.getK8sNodeIP(ctx, ctx.ElfMachine.Name) + if err == nil && nodeIP != "" { + ipToMachineAddressMap[nodeIP] = clusterv1.MachineAddress{ + Address: nodeIP, + Type: clusterv1.MachineInternalIP, + } + + // If not all NICs get IP, return false and wait for next requeue. + if len(ipToMachineAddressMap) < len(networkDevicesRequiringIP) { + return false, nil + } + } else { + if err != nil { + ctx.Logger.Error(err, "failed to get VM NIC IP address from the K8s Node", "Node", ctx.ElfMachine.Name) + } + return false, nil + } } - ctx.ElfMachine.Status.Addresses = ipAddrs + for _, machineAddress := range ipToMachineAddressMap { + ctx.ElfMachine.Status.Addresses = append(ctx.ElfMachine.Status.Addresses, machineAddress) + } return true, nil } @@ -1106,3 +1132,37 @@ func (r *ElfMachineReconciler) deleteNode(ctx *context.MachineContext, nodeName return nil } + +// getK8sNodeIP get the default network IP of K8s Node. +func (r *ElfMachineReconciler) getK8sNodeIP(ctx *context.MachineContext, nodeName string) (string, error) { + // Return early if control plane is not initialized. + if !conditions.IsTrue(ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) { + return "", nil + } + + kubeClient, err := util.NewKubeClient(ctx, ctx.Client, ctx.Cluster) + if err != nil { + return "", errors.Wrapf(err, "failed to get client for Cluster %s/%s", ctx.Cluster.Namespace, ctx.Cluster.Name) + } + + k8sNode, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return "", nil + } + + if err != nil { + return "", errors.Wrapf(err, "failed to get K8s Node %s for Cluster %s/%s", nodeName, ctx.Cluster.Namespace, ctx.Cluster.Name) + } + + if len(k8sNode.Status.Addresses) == 0 { + return "", nil + } + + for _, address := range k8sNode.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + return address.Address, nil + } + } + + return "", nil +} diff --git a/controllers/elfmachine_controller_test.go b/controllers/elfmachine_controller_test.go index e410e9cc..67e923f1 100644 --- a/controllers/elfmachine_controller_test.go +++ b/controllers/elfmachine_controller_test.go @@ -65,10 +65,12 @@ var _ = Describe("ElfMachineReconciler", func() { elfCluster *infrav1.ElfCluster cluster *clusterv1.Cluster elfMachine *infrav1.ElfMachine + k8sNode *corev1.Node machine *clusterv1.Machine kcp *controlplanev1.KubeadmControlPlane md *clusterv1.MachineDeployment secret *corev1.Secret + kubeConfigSecret *corev1.Secret logBuffer *bytes.Buffer mockCtrl *gomock.Controller mockVMService *mock_services.MockVMService @@ -78,6 +80,8 @@ var _ = Describe("ElfMachineReconciler", func() { ctx := goctx.Background() BeforeEach(func() { + var err error + // set log if err := flag.Set("logtostderr", "false"); err != nil { _ = fmt.Errorf("Error setting logtostderr flag") @@ -100,6 +104,24 @@ var _ = Describe("ElfMachineReconciler", func() { mockNewVMService = func(_ goctx.Context, _ infrav1.Tower, _ logr.Logger) (service.VMService, error) { return mockVMService, nil } + + k8sNode = &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: elfMachine.Name, + Labels: map[string]string{}, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Address: "127.0.0.1", + Type: corev1.NodeInternalIP, + }, + }, + }, + } + + kubeConfigSecret, err = helpers.NewKubeConfigSecret(testEnv, cluster.Namespace, cluster.Name) + Expect(err).ShouldNot(HaveOccurred()) }) AfterEach(func() { @@ -317,9 +339,11 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine.SetVMDisconnectionTimestamp(&now) nic := fake.NewTowerVMNic(0) placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) - ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md) + ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) + Expect(testEnv.CreateAndWait(ctx, k8sNode)).To(Succeed()) + mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Return(vm, nil) mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Return(placementGroup, nil) @@ -1396,8 +1420,14 @@ var _ = Describe("ElfMachineReconciler", func() { elfMachine.Status.VMRef = *vm.LocalID vm.EntityAsyncStatus = nil placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) - ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md) + + // before reconcile, create k8s node for VM. + Expect(testEnv.CreateAndWait(ctx, k8sNode)).To(Succeed()) + + ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) + // before reconcile, create kubeconfig secret for cluster. + Expect(helpers.CreateKubeConfigSecret(testEnv, cluster.Namespace, cluster.Name)).To(Succeed()) mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Return(vm, nil) mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) @@ -1425,15 +1455,18 @@ var _ = Describe("ElfMachineReconciler", func() { vm.EntityAsyncStatus = nil elfMachine.Status.VMRef = *vm.LocalID placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) - ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md) + ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) - mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Times(3).Return(vm, nil) - mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) - mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Times(6).Return(placementGroup, nil) - mockVMService.EXPECT().UpsertLabel(gomock.Any(), gomock.Any()).Times(9).Return(fake.NewTowerLabel(), nil) - mockVMService.EXPECT().AddLabelsToVM(gomock.Any(), gomock.Any()).Times(3) + mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Times(11).Return(vm, nil) + mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Times(22).Return(placementGroup, nil) + mockVMService.EXPECT().UpsertLabel(gomock.Any(), gomock.Any()).Times(33).Return(fake.NewTowerLabel(), nil) + mockVMService.EXPECT().AddLabelsToVM(gomock.Any(), gomock.Any()).Times(11) + // k8s node IP is null, VM has no nic info + k8sNode.Status.Addresses = nil + mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) + Expect(testEnv.CreateAndWait(ctx, k8sNode)).To(Succeed()) reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} elfMachineKey := capiutil.ObjectKey(elfMachine) result, err := reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) @@ -1444,10 +1477,55 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForNetworkAddressesReason}}) - nic := fake.NewTowerVMNic(0) - nic.IPAddress = service.TowerString("") + patchHelper, err := patch.NewHelper(k8sNode, testEnv.Client) + Expect(err).ShouldNot(HaveOccurred()) + k8sNode.Status.Addresses = []corev1.NodeAddress{ + { + Address: "test", + Type: corev1.NodeHostName, + }, + } + Expect(patchHelper.Patch(ctx, k8sNode)).To(Succeed()) + mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).NotTo(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(logBuffer.String()).To(ContainSubstring("VM network is not ready yet")) + elfMachine = &infrav1.ElfMachine{} + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForNetworkAddressesReason}}) + + k8sNode.Status.Addresses = []corev1.NodeAddress{ + { + Address: "", + Type: corev1.NodeInternalIP, + }, + } + Expect(patchHelper.Patch(ctx, k8sNode)).To(Succeed()) + + // k8s node IP is null, VM has no nic info mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).NotTo(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(logBuffer.String()).To(ContainSubstring("VM network is not ready yet")) + elfMachine = &infrav1.ElfMachine{} + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForNetworkAddressesReason}}) + + // k8s node IP is null, VM has nic info + nic := fake.NewTowerVMNic(0) + nic.IPAddress = service.TowerString("127.0.0.1") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).To(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + // k8s node IP is null, VM has error nic info + nic = fake.NewTowerVMNic(0) + nic.IPAddress = service.TowerString("") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ShouldNot(HaveOccurred()) @@ -1456,15 +1534,84 @@ var _ = Describe("ElfMachineReconciler", func() { Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForNetworkAddressesReason}}) - mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, errors.New("error")) + k8sNode.Status.Addresses = []corev1.NodeAddress{ + { + Address: "127.0.0.1", + Type: corev1.NodeInternalIP, + }, + } + Expect(patchHelper.Patch(ctx, k8sNode)).To(Succeed()) + // k8s node has node IP, VM has no nic info + mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) Expect(result.RequeueAfter).To(BeZero()) - Expect(err).Should(HaveOccurred()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + + // k8s node has node IP, VM has error nic info + nic = fake.NewTowerVMNic(0) + nic.IPAddress = service.TowerString("") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).To(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + + // k8s node has node IP, VM has one nic info + nic = fake.NewTowerVMNic(0) + nic.IPAddress = service.TowerString("127.0.0.1") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).To(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + + // test elfMachine has two network device. + elfMachine.Spec.Network.Devices = append(elfMachine.Spec.Network.Devices, infrav1.NetworkDeviceSpec{}) + ctrlContext = newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) + fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) + reconciler = &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} + + // k8s node has node IP, VM has no nic info + mockVMService.EXPECT().GetVMNics(*vm.ID).Return(nil, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).NotTo(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) Expect(logBuffer.String()).To(ContainSubstring("VM network is not ready yet")) elfMachine = &infrav1.ElfMachine{} Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) - expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.WaitingForNetworkAddressesReason}}) + expectConditions(elfMachine, []conditionAssertion{{infrav1.VMProvisionedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForNetworkAddressesReason}}) + + // k8s node does not has node IP, VM has two nic info + k8sNode.Status.Addresses = nil + Expect(patchHelper.Patch(ctx, k8sNode)).To(Succeed()) + nic1 := fake.NewTowerVMNic(0) + nic1.IPAddress = service.TowerString("127.0.0.1") + nic2 := fake.NewTowerVMNic(1) + nic2.IPAddress = service.TowerString("127.0.0.2") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic1, nic2}, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).To(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) + + // test elfMachine has 3 network device, one networkType is None + elfMachine.Spec.Network.Devices = append(elfMachine.Spec.Network.Devices, infrav1.NetworkDeviceSpec{NetworkType: infrav1.NetworkTypeNone}) + ctrlContext = newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) + fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) + reconciler = &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} + + // k8s node does not has node IP, VM has 3 nic info, one nic networkType is None + nic1 = fake.NewTowerVMNic(0) + nic1.IPAddress = service.TowerString("127.0.0.1") + nic2 = fake.NewTowerVMNic(1) + nic2.IPAddress = service.TowerString("127.0.0.2") + mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic1, nic2}, nil) + result, err = reconciler.Reconcile(ctx, ctrl.Request{NamespacedName: elfMachineKey}) + Expect(result.RequeueAfter).To(BeZero()) + Expect(err).ShouldNot(HaveOccurred()) + Expect(reconciler.Client.Get(reconciler, elfMachineKey, elfMachine)).To(Succeed()) }) It("should set ElfMachine to ready when VM network is ready", func() { @@ -1472,10 +1619,12 @@ var _ = Describe("ElfMachineReconciler", func() { vm.EntityAsyncStatus = nil elfMachine.Status.VMRef = *vm.LocalID nic := fake.NewTowerVMNic(0) + nic.IPAddress = service.TowerString("127.0.0.1") placementGroup := fake.NewVMPlacementGroup([]string{*vm.ID}) - ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md) + ctrlContext := newCtrlContexts(elfCluster, cluster, elfMachine, machine, secret, md, kubeConfigSecret) fake.InitOwnerReferences(ctrlContext, elfCluster, cluster, elfMachine, machine) + Expect(testEnv.CreateAndWait(ctx, k8sNode)).To(Succeed()) mockVMService.EXPECT().Get(elfMachine.Status.VMRef).Return(vm, nil) mockVMService.EXPECT().GetVMNics(*vm.ID).Return([]*models.VMNic{nic}, nil) mockVMService.EXPECT().GetVMPlacementGroup(gomock.Any()).Times(2).Return(placementGroup, nil) @@ -1949,7 +2098,6 @@ var _ = Describe("ElfMachineReconciler", func() { reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} result, err := reconciler.reconcileDelete(machineContext) - fmt.Println(logBuffer.String()) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ToNot(HaveOccurred()) @@ -1999,7 +2147,6 @@ var _ = Describe("ElfMachineReconciler", func() { reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} result, err := reconciler.reconcileDelete(machineContext) - fmt.Println(logBuffer.String()) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ToNot(HaveOccurred()) @@ -2046,7 +2193,6 @@ var _ = Describe("ElfMachineReconciler", func() { reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} result, err := reconciler.reconcileDelete(machineContext) - fmt.Println(logBuffer.String()) Expect(result.RequeueAfter).NotTo(BeZero()) Expect(err).ToNot(HaveOccurred()) @@ -2086,7 +2232,6 @@ var _ = Describe("ElfMachineReconciler", func() { reconciler := &ElfMachineReconciler{ControllerContext: ctrlContext, NewVMService: mockNewVMService} _, err := reconciler.reconcileDelete(machineContext) - fmt.Println(logBuffer.String()) Expect(err).NotTo(BeZero()) Expect(err.Error()).To(ContainSubstring("failed to get client")) diff --git a/test/helpers/cluster.go b/test/helpers/cluster.go index 41d7a90d..d974acff 100644 --- a/test/helpers/cluster.go +++ b/test/helpers/cluster.go @@ -20,24 +20,12 @@ func CreateKubeConfigSecret(testEnv *TestEnvironment, namespace, clusterName str return err } - bs, err := os.ReadFile(testEnv.Kubeconfig) + secret, err := NewKubeConfigSecret(testEnv, namespace, clusterName) if err != nil { return err } - return testEnv.CreateAndWait(goctx.Background(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: capisecret.Name(clusterName, capisecret.Kubeconfig), - Namespace: namespace, - Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, - }, - }, - Data: map[string][]byte{ - capisecret.KubeconfigDataName: bs, - }, - Type: clusterv1.ClusterSecretType, - }) + return testEnv.CreateAndWait(goctx.Background(), secret) } // GetKubeConfigSecret uses kubeconfig of testEnv to get the workload cluster kubeconfig secret. @@ -73,3 +61,25 @@ func DeleteKubeConfigSecret(testEnv *TestEnvironment, namespace, clusterName str } return nil } + +// NewKubeConfigSecret uses kubeconfig of testEnv to generate the workload cluster kubeconfig secret. +func NewKubeConfigSecret(testEnv *TestEnvironment, namespace, clusterName string) (*corev1.Secret, error) { + bs, err := os.ReadFile(testEnv.Kubeconfig) + if err != nil { + return nil, err + } + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: capisecret.Name(clusterName, capisecret.Kubeconfig), + Namespace: namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterName, + }, + }, + Data: map[string][]byte{ + capisecret.KubeconfigDataName: bs, + }, + Type: clusterv1.ClusterSecretType, + }, nil +}