Skip to content

Commit

Permalink
Three data hall fault domain storage check bug fix (#1869)
Browse files Browse the repository at this point in the history
* Change minimum fault domains from 4 to 3 for three_data_hall redundancy
  • Loading branch information
simenl authored Nov 6, 2023
1 parent f4780b5 commit fbb4c9f
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 4 deletions.
4 changes: 1 addition & 3 deletions api/v1beta2/foundationdb_database_configuration.go
Original file line number Diff line number Diff line change
Expand Up @@ -718,10 +718,8 @@ func MinimumFaultDomains(redundancyMode RedundancyMode) int {
return 1
case RedundancyModeDouble, RedundancyModeUnset:
return 2
case RedundancyModeTriple:
case RedundancyModeTriple, RedundancyModeThreeDataHall:
return 3
case RedundancyModeThreeDataHall:
return 4
default:
return 1
}
Expand Down
58 changes: 57 additions & 1 deletion controllers/remove_process_groups_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ var _ = Describe("remove_process_groups", func() {
})
})

When("the cluster has degraded stroage fault tolerance", func() {
When("the cluster has degraded storage fault tolerance", func() {
BeforeEach(func() {
adminClient, err := mock.NewMockAdminClientUncast(cluster, k8sClient)
Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -192,6 +192,62 @@ var _ = Describe("remove_process_groups", func() {
})
})

When("the cluster has three_data_hall redundancy", func() {
BeforeEach(func() {
adminClient, err := mock.NewMockAdminClientUncast(cluster, k8sClient)
Expect(err).NotTo(HaveOccurred())
adminClient.DatabaseConfiguration.RedundancyMode = fdbv1beta2.RedundancyModeThreeDataHall

})
When("storage have 3 replicas", func() {
BeforeEach(func() {
adminClient, err := mock.NewMockAdminClientUncast(cluster, k8sClient)
Expect(err).NotTo(HaveOccurred())
adminClient.TeamTracker = []fdbv1beta2.FoundationDBStatusTeamTracker{
{
Primary: true,
State: fdbv1beta2.FoundationDBStatusDataState{
Healthy: true,
MinReplicasRemaining: 3,
},
},
}
})
It("should successfully remove that process group", func() {
Expect(result).To(BeNil())
// Ensure resources are deleted
removed, include, err := confirmRemoval(context.Background(), globalControllerLogger, clusterReconciler, cluster, removedProcessGroup)
Expect(err).To(BeNil())
Expect(removed).To(BeTrue())
Expect(include).To(BeTrue())
})
})
When("storage have 2 replicas", func() {
BeforeEach(func() {
adminClient, err := mock.NewMockAdminClientUncast(cluster, k8sClient)
Expect(err).NotTo(HaveOccurred())
adminClient.TeamTracker = []fdbv1beta2.FoundationDBStatusTeamTracker{
{
Primary: true,
State: fdbv1beta2.FoundationDBStatusDataState{
Healthy: true,
MinReplicasRemaining: 2,
},
},
}
})
It("should not remove the process group and should not exclude processes", func() {
Expect(result).NotTo(BeNil())
Expect(result.message).To(Equal("Removals cannot proceed because cluster has degraded fault tolerance"))
// Ensure resources are not deleted
removed, include, err := confirmRemoval(context.Background(), globalControllerLogger, clusterReconciler, cluster, removedProcessGroup)
Expect(err).To(BeNil())
Expect(removed).To(BeFalse())
Expect(include).To(BeFalse())
})
})
})

When("removing multiple process groups", func() {
var initialCnt int
var secondRemovedProcessGroup *fdbv1beta2.ProcessGroupStatus
Expand Down

0 comments on commit fbb4c9f

Please sign in to comment.