Skip to content

Commit

Permalink
Merge pull request #3790 from fabriziopandini/rework-skipped-test
Browse files Browse the repository at this point in the history
🌱 Rework skipped test
  • Loading branch information
k8s-ci-robot authored Oct 15, 2020
2 parents c60ae8b + eb011e6 commit 07f1481
Show file tree
Hide file tree
Showing 7 changed files with 82 additions and 78 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,8 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t *
g.Expect(result.RequeueAfter).To(Equal(time.Duration(0)))
}

// Reconcile returns an error in this case because the owning machine should not go away before the things it owns.
func TestKubeadmConfigReconciler_Reconcile_ReturnErrorIfReferencedMachineIsNotFound(t *testing.T) {
t.Skip("This test doens't look correct, the reconciler returns nil if the owner isn't found")

// Reconcile returns nil if the referenced Machine cannot be found.
func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFound(t *testing.T) {
g := NewWithT(t)

machine := newMachine(nil, "machine")
Expand All @@ -152,7 +150,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnErrorIfReferencedMachineIsNotFo
},
}
_, err := k.Reconcile(ctx, request)
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(BeNil())
}

// If the machine has bootstrap data secret reference, there is no need to generate more bootstrap data.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ import (
)

func Test_inventoryClient_GetManagementGroups(t *testing.T) {
t.Skip("Some of these tests now fail, because they rely on ordering to compare items, needs some rework")

type fields struct {
proxy Proxy
}
Expand Down Expand Up @@ -157,7 +155,11 @@ func Test_inventoryClient_GetManagementGroups(t *testing.T) {
return
}
g.Expect(err).NotTo(HaveOccurred())
g.Expect(got).To(ConsistOf(tt.want))
g.Expect(got).To(HaveLen(len(tt.want)))
for i := range tt.want {
g.Expect(got[i].CoreProvider).To(Equal(tt.want[i].CoreProvider))
g.Expect(got[i].Providers).To(ConsistOf(tt.want[i].Providers))
}
})
}
}
Expand Down
6 changes: 0 additions & 6 deletions cmd/clusterctl/client/cluster/mover_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,6 @@ var moveTests = []struct {
}

func Test_getMoveSequence(t *testing.T) {
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")

// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
for _, tt := range moveTests {
t.Run(tt.name, func(t *testing.T) {
Expand Down Expand Up @@ -474,8 +472,6 @@ func Test_getMoveSequence(t *testing.T) {
}

func Test_objectMover_move_dryRun(t *testing.T) {
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")

// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
for _, tt := range moveTests {
t.Run(tt.name, func(t *testing.T) {
Expand Down Expand Up @@ -550,8 +546,6 @@ func Test_objectMover_move_dryRun(t *testing.T) {
}

func Test_objectMover_move(t *testing.T) {
t.Skip("A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")

// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
for _, tt := range moveTests {
t.Run(tt.name, func(t *testing.T) {
Expand Down
17 changes: 7 additions & 10 deletions cmd/clusterctl/client/cluster/objectgraph_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,13 +101,13 @@ type wantGraph struct {
func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
g := NewWithT(t)

g.Expect(len(got.uidToNode)).To(Equal(len(want.nodes)))
g.Expect(len(got.uidToNode)).To(Equal(len(want.nodes)), "the number of nodes in the objectGraph doesn't match the number of expected nodes")

for uid, wantNode := range want.nodes {
gotNode, ok := got.uidToNode[types.UID(uid)]
g.Expect(ok).To(BeTrue(), "node ", uid, " not found")
g.Expect(gotNode.virtual).To(Equal(wantNode.virtual))
g.Expect(gotNode.owners).To(HaveLen(len(wantNode.owners)))
g.Expect(ok).To(BeTrue(), "node %q not found", uid)
g.Expect(gotNode.virtual).To(Equal(wantNode.virtual), "node %q.virtual does not have the expected value", uid)
g.Expect(gotNode.owners).To(HaveLen(len(wantNode.owners)), "node %q.owner does not have the expected length", uid)

for _, wantOwner := range wantNode.owners {
found := false
Expand All @@ -117,10 +117,10 @@ func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
break
}
}
g.Expect(found).To(BeTrue())
g.Expect(found).To(BeTrue(), "node %q.owners does not contain %q", uid, wantOwner)
}

g.Expect(gotNode.softOwners).To(HaveLen(len(wantNode.softOwners)))
g.Expect(gotNode.softOwners).To(HaveLen(len(wantNode.softOwners)), "node %q.softOwners does not have the expected length", uid)

for _, wantOwner := range wantNode.softOwners {
found := false
Expand All @@ -130,7 +130,7 @@ func assertGraph(t *testing.T, got *objectGraph, want wantGraph) {
break
}
}
g.Expect(found).To(BeTrue())
g.Expect(found).To(BeTrue(), "node %q.softOwners does not contain %q", uid, wantOwner)
}
}
}
Expand Down Expand Up @@ -1017,7 +1017,6 @@ var objectGraphsTests = []struct {
},
},
},

{
name: "Cluster and Global + Namespaced External Objects",
args: objectGraphTestArgs{
Expand Down Expand Up @@ -1116,8 +1115,6 @@ func getFakeDiscoveryTypes(graph *objectGraph) error {
}

func TestObjectGraph_Discovery(t *testing.T) {
t.Skip("TestObjectGraph_Discovery/A_ClusterResourceSet_applied_to_a_cluster is now failing, needs to be investigated")

// NB. we are testing the graph is properly built starting from objects (TestGraphBuilder_addObj_WithFakeObjects) or from the same objects read from the cluster (this test).
for _, tt := range objectGraphsTests {
t.Run(tt.name, func(t *testing.T) {
Expand Down
18 changes: 6 additions & 12 deletions cmd/clusterctl/internal/test/fake_objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -1152,21 +1152,15 @@ func SelectClusterObj(objs []client.Object, namespace, name string) *clusterv1.C
continue
}

accessor, err := meta.Accessor(o)
if err != nil {
panic(fmt.Sprintf("failed to get accessor for %s: %v", o.GetObjectKind(), err))
}

if accessor.GetName() == name && accessor.GetNamespace() == namespace {
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
},
}
if o.GetName() == name && o.GetNamespace() == namespace {
// Converts the object to cluster
// NB. Convert returns an object without version/kind, so we are enforcing those values back.
cluster := &clusterv1.Cluster{}
if err := FakeScheme.Convert(o, cluster, nil); err != nil {
panic(fmt.Sprintf("failed to convert %s to cluster: %v", o.GetObjectKind(), err))
}
cluster.APIVersion = o.GetObjectKind().GroupVersionKind().GroupVersion().String()
cluster.Kind = o.GetObjectKind().GroupVersionKind().Kind
return cluster
}
}
Expand Down
10 changes: 4 additions & 6 deletions controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,9 +194,7 @@ var _ = Describe("Cluster Reconciler", func() {
}, timeout).Should(BeTrue())
})

It("Should successfully patch a cluster object if only removing finalizers", func() {
Skip("This test doesn't look correct, if we remove the finalizer the reconciler takes care of re-adding it")

It("Should re-apply finalizers if removed", func() {
// Setup
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -219,7 +217,7 @@ var _ = Describe("Cluster Reconciler", func() {
return len(cluster.Finalizers) > 0
}, timeout).Should(BeTrue())

// Patch
// Remove finalizers
Eventually(func() bool {
ph, err := patch.NewHelper(cluster, testEnv)
Expect(err).ShouldNot(HaveOccurred())
Expand All @@ -230,14 +228,14 @@ var _ = Describe("Cluster Reconciler", func() {

Expect(cluster.Finalizers).Should(BeEmpty())

// Assertions
// Check finalizers are re-applied
Eventually(func() []string {
instance := &clusterv1.Cluster{}
if err := testEnv.Get(ctx, key, instance); err != nil {
return []string{"not-empty"}
}
return instance.Finalizers
}, timeout).Should(BeEmpty())
}, timeout).ShouldNot(BeEmpty())
})

It("Should successfully set Status.ControlPlaneInitialized on the cluster object if controlplane is ready", func() {
Expand Down
93 changes: 57 additions & 36 deletions controlplane/kubeadm/internal/workload_cluster_coredns_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"testing"

. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"

"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
Expand All @@ -36,8 +37,6 @@ import (
)

func TestUpdateCoreDNS(t *testing.T) {
t.Skip("This now fails because it's using Update instead of patch, needs rework")

validKCP := &controlplanev1.KubeadmControlPlane{
Spec: controlplanev1.KubeadmControlPlaneSpec{
KubeadmConfigSpec: cabpkv1.KubeadmConfigSpec{
Expand Down Expand Up @@ -284,44 +283,66 @@ kind: ClusterConfiguration
},
}

// We are using testEnv as a workload cluster, and given that each test case assumes well known objects with specific
// Namespace/Name (e.g. The CoderDNS ConfigMap & Deployment, the kubeadm ConfigMap), it is not possible to run the use cases in parallel.
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
fakeClient := fake.NewFakeClientWithScheme(scheme.Scheme, tt.objs...)
w := &Workload{
Client: fakeClient,
CoreDNSMigrator: tt.migrator,
}
err := w.UpdateCoreDNS(ctx, tt.kcp)
if tt.expectErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).ToNot(HaveOccurred())
g := NewWithT(t)
t.Log(tt.name)

for _, o := range tt.objs {
// NB. deep copy test object so changes applied during a test does not affect other tests.
o := o.DeepCopyObject().(client.Object)
g.Expect(testEnv.Create(ctx, o)).To(Succeed())
}

// Assert that CoreDNS updates have been made
if tt.expectUpdates {
// assert kubeadmConfigMap
var expectedKubeadmConfigMap corev1.ConfigMap
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed())
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("1.7.2")))
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("k8s.gcr.io/some-repo")))

// assert CoreDNS corefile
var expectedConfigMap corev1.ConfigMap
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed())
g.Expect(expectedConfigMap.Data).To(HaveLen(2))
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file"))
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile))

// assert CoreDNS deployment
var actualDeployment appsv1.Deployment
g.Expect(fakeClient.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed())
// ensure the image is updated and the volumes point to the corefile
g.Expect(actualDeployment.Spec.Template.Spec.Containers[0].Image).To(Equal("k8s.gcr.io/some-repo/coredns:1.7.2"))
w := &Workload{
Client: testEnv.GetClient(),
CoreDNSMigrator: tt.migrator,
}
err := w.UpdateCoreDNS(ctx, tt.kcp)
if tt.expectErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).ToNot(HaveOccurred())

// Assert that CoreDNS updates have been made
if tt.expectUpdates {
// assert kubeadmConfigMap
var expectedKubeadmConfigMap corev1.ConfigMap
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem}, &expectedKubeadmConfigMap)).To(Succeed())
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("1.7.2")))
g.Expect(expectedKubeadmConfigMap.Data).To(HaveKeyWithValue("ClusterConfiguration", ContainSubstring("k8s.gcr.io/some-repo")))

// assert CoreDNS corefile
var expectedConfigMap corev1.ConfigMap
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &expectedConfigMap)).To(Succeed())
g.Expect(expectedConfigMap.Data).To(HaveLen(2))
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile", "updated-core-file"))
g.Expect(expectedConfigMap.Data).To(HaveKeyWithValue("Corefile-backup", expectedCorefile))

// assert CoreDNS deployment
var actualDeployment appsv1.Deployment
g.Eventually(func() string {
g.Expect(testEnv.Get(ctx, ctrlclient.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed())
return actualDeployment.Spec.Template.Spec.Containers[0].Image
}, "5s").Should(Equal("k8s.gcr.io/some-repo/coredns:1.7.2"))
}

// Cleanup test objects (and wait for deletion to complete).
testEnv.Cleanup(ctx, tt.objs...)
g.Eventually(func() bool {
for _, o := range []client.Object{cm, depl, kubeadmCM} {
// NB. deep copy test object so changes applied during a test does not affect other tests.
o := o.DeepCopyObject().(client.Object)
key, _ := client.ObjectKeyFromObject(o)
err := testEnv.Get(ctx, key, o)
if err == nil || (err != nil && !apierrors.IsNotFound(err)) {
return false
}
}
})
return true
}, "10s").Should(BeTrue())
}
}

Expand Down

0 comments on commit 07f1481

Please sign in to comment.