diff --git a/.evergreen-tasks.yml b/.evergreen-tasks.yml index 069a695e9..b5366b2a0 100644 --- a/.evergreen-tasks.yml +++ b/.evergreen-tasks.yml @@ -254,6 +254,11 @@ tasks: commands: - func: "e2e_test" + - name: e2e_mongodb_custom_roles + tags: [ "patch-run" ] + commands: + - func: "e2e_test" + - name: e2e_replica_set_recovery tags: [ "patch-run" ] commands: @@ -644,7 +649,7 @@ tasks: commands: - func: "e2e_test" - - name: e2e_replica_set_custom_roles + - name: e2e_replica_set_ldap_custom_roles tags: [ "patch-run" ] commands: - func: "e2e_test" diff --git a/.evergreen.yml b/.evergreen.yml index 03fc5be8f..a41a356cd 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -728,7 +728,7 @@ task_groups: - e2e_replica_set_ldap_user_to_dn_mapping # e2e_replica_set_ldap_agent_auth - e2e_replica_set_ldap_agent_client_certs - - e2e_replica_set_custom_roles + - e2e_replica_set_ldap_custom_roles - e2e_replica_set_update_roles_no_privileges - e2e_replica_set_ldap_group_dn - e2e_replica_set_ldap_group_dn_with_x509_agent @@ -911,6 +911,7 @@ task_groups: - e2e_tls_x509_configure_all_options_sc - e2e_tls_x509_sc - e2e_meko_mck_upgrade + - e2e_mongodb_custom_roles - e2e_sharded_cluster_oidc_m2m_group - e2e_sharded_cluster_oidc_m2m_user - e2e_multi_cluster_oidc_m2m_group diff --git a/PROJECT b/PROJECT index 70c6f9556..3d36c752b 100644 --- a/PROJECT +++ b/PROJECT @@ -1,5 +1,13 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: mongodb.com -layout: go.kubebuilder.io/v3 +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} projectName: mongodb-kubernetes repo: github.com/mongodb/mongodb-kubernetes resources: @@ -30,7 +38,13 @@ resources: kind: MongoDBUser path: github.com/mongodb/mongodb-kubernetes/api/v1 version: v1 +- api: + crdVersion: v1 + namespaced: false + controller: false + domain: mongodb.com + group: mongodb + kind: ClusterMongoDBRole + path: github.com/mongodb/mongodb-kubernetes/api/v1 + version: v1 version: "3" -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 8acaff1d8..011835f48 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,6 +1,24 @@ [//]: # (Consider renaming or removing the header for next release, otherwise it appears as duplicate in the published release, e.g: https://github.com/mongodb/mongodb-enterprise-kubernetes/releases/tag/1.22.0 ) +# MCK 1.2.0 Release Notes + +## New Features + +* Added new **ClusterMongoDBRole** CRD to support reusable roles across multiple MongoDB clusters. + * This allows users to define roles once and reuse them in multiple **MongoDB** or **MongoDBMultiCluster** resources. The role can be referenced through the `.spec.security.roleRefs` field. Note that only one of `.spec.security.roles` and `.spec.security.roleRefs` can be used at a time. + * **ClusterMongoDBRole** resources are treated by the operator as a custom role templates that are only used when referenced by the database resources. + * The new resource is watched by default by the operator. This means that the operator will require a new **ClusterRole** and **ClusterRoleBinding** to be created in the cluster. **ClusterRole** and **ClusterRoleBinding** resources are created by default with the helm chart or the kubectl mongodb plugin. + * To disable this behavior in the helm chart, set the `operator.enableClusterMongoDBRoles` value to `false`. This will disable the creation of the necessary RBAC resources for the **ClusterMongoDBRole** resource, as well as disable the watch for this resource. + * To not install the necessary **ClusterRole** and **ClusterRoleBinding** with the kubectl mongodb plugin set the `--create-mongodb-roles-cluster-role` to false. + * The new **ClusterMongoDBRole** resource is designed to be read-only, meaning it can be used by MongoDB deployments managed by different operators. + * The **ClusterMongoDBRole** resource can be deleted at any time, but the operator will not delete any roles that were created using this resource. To properly remove access, you must **manually** remove the reference to the **ClusterMongoDBRole** in the **MongoDB** or **MongoDBMultiCluster** resources. + * The reference documentation for this resource can be found here: **TODO** (link to documentation) + * For more information please see: **TODO** (link to documentation) + + + + # MCK 1.1.0 Release Notes ## New Features @@ -12,7 +30,6 @@ * minimum MongoDB Community version: 8.0. * TLS must be disabled in MongoDB (communication between mongot and mongod is in plaintext for now). - # MCK 1.0.1 Release Notes diff --git a/api/v1/mdb/mongodb_roles_validation.go b/api/v1/mdb/mongodb_roles_validation.go index 174bb01d2..749ce8c04 100644 --- a/api/v1/mdb/mongodb_roles_validation.go +++ b/api/v1/mdb/mongodb_roles_validation.go @@ -270,7 +270,7 @@ func isValidCIDR(cidr string) bool { return err == nil } -func roleIsCorrectlyConfigured(role MongoDbRole, mdbVersion string) v1.ValidationResult { +func RoleIsCorrectlyConfigured(role MongoDBRole, mdbVersion string) v1.ValidationResult { // Extensive validation of the roles attribute if role.Role == "" { @@ -305,10 +305,10 @@ func roleIsCorrectlyConfigured(role MongoDbRole, mdbVersion string) v1.Validatio return v1.ValidationSuccess() } -func rolesAttributeisCorrectlyConfigured(d DbCommonSpec) v1.ValidationResult { +func rolesAttributeIsCorrectlyConfigured(d DbCommonSpec) v1.ValidationResult { // Validate every single entry and return error on the first one that fails validation for _, role := range d.Security.Roles { - if res := roleIsCorrectlyConfigured(role, d.Version); res.Level == v1.ErrorLevel { + if res := RoleIsCorrectlyConfigured(role, d.Version); res.Level == v1.ErrorLevel { return v1.ValidationError("Error validating role - %s", res.Msg) } } diff --git a/api/v1/mdb/mongodb_types.go b/api/v1/mdb/mongodb_types.go index 2add98c82..0f71068b9 100644 --- a/api/v1/mdb/mongodb_types.go +++ b/api/v1/mdb/mongodb_types.go @@ -742,10 +742,16 @@ type SharedConnectionSpec struct { CloudManagerConfig *PrivateCloudConfig `json:"cloudManager,omitempty"` } +// +kubebuilder:validation:XValidation:rule="!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() > 0 && self.roleRefs.size() > 0)",message="At most one of roles or roleRefs can be non-empty" type Security struct { TLSConfig *TLSConfig `json:"tls,omitempty"` Authentication *Authentication `json:"authentication,omitempty"` - Roles []MongoDbRole `json:"roles,omitempty"` + + // +optional + Roles []MongoDBRole `json:"roles,omitempty"` + + // +optional + RoleRefs []MongoDBRoleRef `json:"roleRefs,omitempty"` // +optional CertificatesSecretsPrefix string `json:"certsSecretPrefix"` @@ -973,7 +979,16 @@ type InheritedRole struct { Role string `json:"role"` } -type MongoDbRole struct { +type MongoDBRoleRef struct { + // +kubebuilder:validation:Required + Name string `json:"name"` + + // +kubebuilder:validation:Enum=ClusterMongoDBRole + // +kubebuilder:validation:Required + Kind string `json:"kind"` +} + +type MongoDBRole struct { Role string `json:"role"` AuthenticationRestrictions []AuthenticationRestriction `json:"authenticationRestrictions,omitempty"` Db string `json:"db"` @@ -1604,7 +1619,10 @@ func EnsureSecurity(sec *Security) *Security { sec.TLSConfig = &TLSConfig{} } if sec.Roles == nil { - sec.Roles = make([]MongoDbRole, 0) + sec.Roles = make([]MongoDBRole, 0) + } + if sec.RoleRefs == nil { + sec.RoleRefs = make([]MongoDBRoleRef, 0) } return sec } diff --git a/api/v1/mdb/mongodb_validation.go b/api/v1/mdb/mongodb_validation.go index 69d90e8d0..d33d3167c 100644 --- a/api/v1/mdb/mongodb_validation.go +++ b/api/v1/mdb/mongodb_validation.go @@ -394,7 +394,7 @@ func CommonValidators(db DbCommonSpec) []func(d DbCommonSpec) v1.ValidationResul deploymentsMustHaveAgentModeInAuthModes, scramSha1AuthValidation, ldapAuthRequiresEnterprise, - rolesAttributeisCorrectlyConfigured, + rolesAttributeIsCorrectlyConfigured, agentModeIsSetIfMoreThanADeploymentAuthModeIsSet, ldapGroupDnIsSetIfLdapAuthzIsEnabledAndAgentsAreExternal, specWithExactlyOneSchema, diff --git a/api/v1/mdb/mongodbbuilder.go b/api/v1/mdb/mongodbbuilder.go index 1740dca05..737be1ba5 100644 --- a/api/v1/mdb/mongodbbuilder.go +++ b/api/v1/mdb/mongodbbuilder.go @@ -146,6 +146,22 @@ func (b *MongoDBBuilder) SetSecurityTLSEnabled() *MongoDBBuilder { return b } +func (b *MongoDBBuilder) SetRoles(roles []MongoDBRole) *MongoDBBuilder { + if b.mdb.Spec.Security == nil { + b.mdb.Spec.Security = &Security{} + } + b.mdb.Spec.Security.Roles = roles + return b +} + +func (b *MongoDBBuilder) SetRoleRefs(roleRefs []MongoDBRoleRef) *MongoDBBuilder { + if b.mdb.Spec.Security == nil { + b.mdb.Spec.Security = &Security{} + } + b.mdb.Spec.Security.RoleRefs = roleRefs + return b +} + func (b *MongoDBBuilder) SetLabels(labels map[string]string) *MongoDBBuilder { b.mdb.Labels = labels return b diff --git a/api/v1/mdb/zz_generated.deepcopy.go b/api/v1/mdb/zz_generated.deepcopy.go index ac09a290b..f1e25ee2c 100644 --- a/api/v1/mdb/zz_generated.deepcopy.go +++ b/api/v1/mdb/zz_generated.deepcopy.go @@ -774,29 +774,7 @@ func (in *MongoDBList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDbPodSpec) DeepCopyInto(out *MongoDbPodSpec) { - *out = *in - out.ContainerResourceRequirements = in.ContainerResourceRequirements - in.PodTemplateWrapper.DeepCopyInto(&out.PodTemplateWrapper) - if in.Persistence != nil { - in, out := &in.Persistence, &out.Persistence - *out = new(common.Persistence) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDbPodSpec. -func (in *MongoDbPodSpec) DeepCopy() *MongoDbPodSpec { - if in == nil { - return nil - } - out := new(MongoDbPodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MongoDbRole) DeepCopyInto(out *MongoDbRole) { +func (in *MongoDBRole) DeepCopyInto(out *MongoDBRole) { *out = *in if in.AuthenticationRestrictions != nil { in, out := &in.AuthenticationRestrictions, &out.AuthenticationRestrictions @@ -819,12 +797,49 @@ func (in *MongoDbRole) DeepCopyInto(out *MongoDbRole) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDbRole. -func (in *MongoDbRole) DeepCopy() *MongoDbRole { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBRole. +func (in *MongoDBRole) DeepCopy() *MongoDBRole { if in == nil { return nil } - out := new(MongoDbRole) + out := new(MongoDBRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDBRoleRef) DeepCopyInto(out *MongoDBRoleRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDBRoleRef. +func (in *MongoDBRoleRef) DeepCopy() *MongoDBRoleRef { + if in == nil { + return nil + } + out := new(MongoDBRoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MongoDbPodSpec) DeepCopyInto(out *MongoDbPodSpec) { + *out = *in + out.ContainerResourceRequirements = in.ContainerResourceRequirements + in.PodTemplateWrapper.DeepCopyInto(&out.PodTemplateWrapper) + if in.Persistence != nil { + in, out := &in.Persistence, &out.Persistence + *out = new(common.Persistence) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongoDbPodSpec. +func (in *MongoDbPodSpec) DeepCopy() *MongoDbPodSpec { + if in == nil { + return nil + } + out := new(MongoDbPodSpec) in.DeepCopyInto(out) return out } @@ -1163,11 +1178,16 @@ func (in *Security) DeepCopyInto(out *Security) { } if in.Roles != nil { in, out := &in.Roles, &out.Roles - *out = make([]MongoDbRole, len(*in)) + *out = make([]MongoDBRole, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.RoleRefs != nil { + in, out := &in.RoleRefs, &out.RoleRefs + *out = make([]MongoDBRoleRef, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Security. diff --git a/api/v1/mdbmulti/mongodbmultibuilder.go b/api/v1/mdbmulti/mongodbmultibuilder.go index 17c8596d0..619598712 100644 --- a/api/v1/mdbmulti/mongodbmultibuilder.go +++ b/api/v1/mdbmulti/mongodbmultibuilder.go @@ -46,7 +46,7 @@ func DefaultMultiReplicaSetBuilder() *MultiReplicaSetBuilder { Authentication: &mdbv1.Authentication{ Modes: []mdbv1.AuthMode{}, }, - Roles: []mdbv1.MongoDbRole{}, + Roles: []mdbv1.MongoDBRole{}, }, DuplicateServiceObjects: util.BooleanRef(false), }, @@ -73,6 +73,14 @@ func (m *MultiReplicaSetBuilder) SetSecurity(s *mdbv1.Security) *MultiReplicaSet return m } +func (m *MultiReplicaSetBuilder) SetRoleRefs(roleRefs []mdbv1.MongoDBRoleRef) *MultiReplicaSetBuilder { + if m.Spec.Security == nil { + m.Spec.Security = &mdbv1.Security{} + } + m.Spec.Security.RoleRefs = roleRefs + return m +} + func (m *MultiReplicaSetBuilder) SetClusterSpecList(clusters []string) *MultiReplicaSetBuilder { randFive, err := rand.Int(rand.Reader, big.NewInt(5)) if err != nil { diff --git a/api/v1/role/clustermongodbrole_types.go b/api/v1/role/clustermongodbrole_types.go new file mode 100644 index 000000000..d8f9ef206 --- /dev/null +++ b/api/v1/role/clustermongodbrole_types.go @@ -0,0 +1,40 @@ +package role + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes/api/v1" + mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" +) + +// ClusterMongoDBRoleSpec defines the desired state of ClusterMongoDBRole. +type ClusterMongoDBRoleSpec struct { + // +kubebuilder:pruning:PreserveUnknownFields + mdbv1.MongoDBRole `json:",inline"` +} + +// +kubebuilder:object:root=true +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Cluster,shortName=cmdbr +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The time since the MongoDB Custom Role resource was created." + +// ClusterMongoDBRole is the Schema for the clustermongodbroles API. +type ClusterMongoDBRole struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterMongoDBRoleSpec `json:"spec,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterMongoDBRoleList contains a list of ClusterMongoDBRole. +type ClusterMongoDBRoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterMongoDBRole `json:"items"` +} + +func init() { + v1.SchemeBuilder.Register(&ClusterMongoDBRole{}, &ClusterMongoDBRoleList{}) +} diff --git a/api/v1/role/doc.go b/api/v1/role/doc.go new file mode 100644 index 000000000..1d0417c6a --- /dev/null +++ b/api/v1/role/doc.go @@ -0,0 +1,4 @@ +package role + +// +k8s:deepcopy-gen=package +// +versionName=v1 diff --git a/api/v1/role/groupversion_info.go b/api/v1/role/groupversion_info.go new file mode 100644 index 000000000..a1a8f98da --- /dev/null +++ b/api/v1/role/groupversion_info.go @@ -0,0 +1,20 @@ +// Package v1 contains API Schema definitions for the mongodb v1 API group +// +kubebuilder:object:generate=true +// +groupName=mongodb.com +package role + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "mongodb.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1/role/rolebuilder.go b/api/v1/role/rolebuilder.go new file mode 100644 index 000000000..a85c9876e --- /dev/null +++ b/api/v1/role/rolebuilder.go @@ -0,0 +1,67 @@ +package role + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" +) + +type ClusterMongoDBRoleBuilder struct { + name string + finalizers []string + annotations map[string]string + mongoDBRole mdb.MongoDBRole +} + +func DefaultClusterMongoDBRoleBuilder() *ClusterMongoDBRoleBuilder { + return &ClusterMongoDBRoleBuilder{ + name: "default-role", + finalizers: []string{}, + mongoDBRole: mdb.MongoDBRole{ + Role: "default-role", + AuthenticationRestrictions: nil, + Db: "admin", + Privileges: nil, + Roles: []mdb.InheritedRole{ + { + Role: "readWrite", + Db: "admin", + }, + }, + }, + annotations: map[string]string{}, + } +} + +func (b *ClusterMongoDBRoleBuilder) SetName(name string) *ClusterMongoDBRoleBuilder { + b.name = name + return b +} + +func (b *ClusterMongoDBRoleBuilder) AddFinalizer(finalizer string) *ClusterMongoDBRoleBuilder { + b.finalizers = append(b.finalizers, finalizer) + return b +} + +func (b *ClusterMongoDBRoleBuilder) SetMongoDBRole(role mdb.MongoDBRole) *ClusterMongoDBRoleBuilder { + b.mongoDBRole = role + return b +} + +func (b *ClusterMongoDBRoleBuilder) AddAnnotation(key, value string) *ClusterMongoDBRoleBuilder { + b.annotations[key] = value + return b +} + +func (b *ClusterMongoDBRoleBuilder) Build() *ClusterMongoDBRole { + return &ClusterMongoDBRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.name, + Finalizers: b.finalizers, + Annotations: b.annotations, + }, + Spec: ClusterMongoDBRoleSpec{ + MongoDBRole: b.mongoDBRole, + }, + } +} diff --git a/api/v1/role/zz_generated.deepcopy.go b/api/v1/role/zz_generated.deepcopy.go new file mode 100644 index 000000000..e62671271 --- /dev/null +++ b/api/v1/role/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2021. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package role + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMongoDBRole) DeepCopyInto(out *ClusterMongoDBRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMongoDBRole. +func (in *ClusterMongoDBRole) DeepCopy() *ClusterMongoDBRole { + if in == nil { + return nil + } + out := new(ClusterMongoDBRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMongoDBRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMongoDBRoleBuilder) DeepCopyInto(out *ClusterMongoDBRoleBuilder) { + *out = *in + if in.finalizers != nil { + in, out := &in.finalizers, &out.finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.annotations != nil { + in, out := &in.annotations, &out.annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.mongoDBRole.DeepCopyInto(&out.mongoDBRole) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMongoDBRoleBuilder. +func (in *ClusterMongoDBRoleBuilder) DeepCopy() *ClusterMongoDBRoleBuilder { + if in == nil { + return nil + } + out := new(ClusterMongoDBRoleBuilder) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMongoDBRoleList) DeepCopyInto(out *ClusterMongoDBRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterMongoDBRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMongoDBRoleList. +func (in *ClusterMongoDBRoleList) DeepCopy() *ClusterMongoDBRoleList { + if in == nil { + return nil + } + out := new(ClusterMongoDBRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMongoDBRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMongoDBRoleSpec) DeepCopyInto(out *ClusterMongoDBRoleSpec) { + *out = *in + in.MongoDBRole.DeepCopyInto(&out.MongoDBRole) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMongoDBRoleSpec. +func (in *ClusterMongoDBRoleSpec) DeepCopy() *ClusterMongoDBRoleSpec { + if in == nil { + return nil + } + out := new(ClusterMongoDBRoleSpec) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/mongodb.com_clustermongodbroles.yaml b/config/crd/bases/mongodb.com_clustermongodbroles.yaml new file mode 100644 index 000000000..9241b7dad --- /dev/null +++ b/config/crd/bases/mongodb.com_clustermongodbroles.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clustermongodbroles.mongodb.com +spec: + group: mongodb.com + names: + kind: ClusterMongoDBRole + listKind: ClusterMongoDBRoleList + plural: clustermongodbroles + shortNames: + - cmdbr + singular: clustermongodbrole + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The time since the MongoDB Custom Role resource was created. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterMongoDBRole is the Schema for the clustermongodbroles + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterMongoDBRoleSpec defines the desired state of ClusterMongoDBRole. + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + - resource + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - role + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: {} diff --git a/config/crd/bases/mongodb.com_mongodb.yaml b/config/crd/bases/mongodb.com_mongodb.yaml index bcee049d3..1e93e3116 100644 --- a/config/crd/bases/mongodb.com_mongodb.yaml +++ b/config/crd/bases/mongodb.com_mongodb.yaml @@ -1606,6 +1606,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -1685,6 +1699,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: |- DEPRECATED please use `spec.statefulSet.spec.serviceName` to provide a custom service name. diff --git a/config/crd/bases/mongodb.com_mongodbmulticluster.yaml b/config/crd/bases/mongodb.com_mongodbmulticluster.yaml index 7e8a77784..210155079 100644 --- a/config/crd/bases/mongodb.com_mongodbmulticluster.yaml +++ b/config/crd/bases/mongodb.com_mongodbmulticluster.yaml @@ -866,6 +866,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -945,6 +959,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' statefulSet: description: |- StatefulSetConfiguration provides the statefulset override for each of the cluster's statefulset diff --git a/config/crd/bases/mongodb.com_opsmanagers.yaml b/config/crd/bases/mongodb.com_opsmanagers.yaml index 8d1efe649..93e3a94f7 100644 --- a/config/crd/bases/mongodb.com_opsmanagers.yaml +++ b/config/crd/bases/mongodb.com_opsmanagers.yaml @@ -928,6 +928,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -1007,6 +1021,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: this is an optional service, it will get the name "-svc" in case not provided diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index fca4e8e81..8fde6f8a7 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -8,6 +8,7 @@ resources: - bases/mongodb.com_mongodbmulticluster.yaml - bases/mongodb.com_mongodbsearch.yaml - bases/mongodbcommunity.mongodb.com_mongodbcommunity.yaml +- bases/mongodb.com_clustermongodbroles.yaml # +kubebuilder:scaffold:crdkustomizeresource # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 73b9c2d59..268ec2c97 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -30,6 +30,7 @@ spec: - -watch-resource=mongodbusers - -watch-resource=mongodbcommunity - -watch-resource=mongodbsearch + - -watch-resource=clustermongodbroles command: - /usr/local/bin/mongodb-kubernetes-operator resources: diff --git a/config/rbac/operator-roles.yaml b/config/rbac/operator-roles.yaml index b8f1e5f50..cdf589e48 100644 --- a/config/rbac/operator-roles.yaml +++ b/config/rbac/operator-roles.yaml @@ -1,5 +1,18 @@ --- # Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +rules: + - apiGroups: + - mongodb.com + verbs: + - '*' + resources: + - clustermongodbroles +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml --- # Additional ClusterRole for clusterVersionDetection kind: ClusterRole @@ -29,6 +42,20 @@ rules: - list --- # Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +subjects: + - kind: ServiceAccount + name: mongodb-kubernetes-operator + namespace: mongodb +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml # ClusterRoleBinding for clusterVersionDetection kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/config/samples/cluster-mongodb-role.yaml b/config/samples/cluster-mongodb-role.yaml new file mode 100644 index 000000000..4542c9cb1 --- /dev/null +++ b/config/samples/cluster-mongodb-role.yaml @@ -0,0 +1,15 @@ +apiVersion: mongodb.com/v1 +kind: ClusterMongoDBRole +metadata: + labels: + app.kubernetes.io/name: mongodb-enterprise + app.kubernetes.io/managed-by: kustomize + name: clustermongodbrole-sample +spec: + role: "rootMonitor" + db: "admin" + roles: + - db: "admin" + role: "root" + - db: "admin" + role: "clusterMonitor" diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 3f2f94737..78e7ad6df 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -4,3 +4,4 @@ resources: - mongodb-user.yaml - mongodb-om.yaml - mongodb-multi.yaml +- cluster-mongodb-role.yaml diff --git a/controllers/om/deployment.go b/controllers/om/deployment.go index 254816648..188de7ff6 100644 --- a/controllers/om/deployment.go +++ b/controllers/om/deployment.go @@ -60,7 +60,7 @@ func init() { gob.Register(tls.Prefer) gob.Register(tls.Allow) gob.Register(tls.Disabled) - gob.Register([]mdbv1.MongoDbRole{}) + gob.Register([]mdbv1.MongoDBRole{}) gob.Register([]automationconfig.MemberOptions{}) } @@ -634,24 +634,24 @@ func (d Deployment) GetNumberOfExcessProcesses(resourceName string) int { return excessProcesses } -func (d Deployment) SetRoles(roles []mdbv1.MongoDbRole) { +func (d Deployment) SetRoles(roles []mdbv1.MongoDBRole) { d["roles"] = roles } -func (d Deployment) GetRoles() []mdbv1.MongoDbRole { +func (d Deployment) GetRoles() []mdbv1.MongoDBRole { roles, ok := d["roles"] if !ok || roles == nil { - return []mdbv1.MongoDbRole{} + return []mdbv1.MongoDBRole{} } rolesBytes, err := json.Marshal(roles) if err != nil { - return []mdbv1.MongoDbRole{} + return []mdbv1.MongoDBRole{} } - var result []mdbv1.MongoDbRole + var result []mdbv1.MongoDBRole if err := json.Unmarshal(rolesBytes, &result); err != nil { - return []mdbv1.MongoDbRole{} + return []mdbv1.MongoDBRole{} } return result diff --git a/controllers/operator/authentication_test.go b/controllers/operator/authentication_test.go index b681f4682..6b47167de 100644 --- a/controllers/operator/authentication_test.go +++ b/controllers/operator/authentication_test.go @@ -47,7 +47,7 @@ func TestX509CanBeEnabled_WhenThereAreOnlyTlsDeployments_ReplicaSet(t *testing.T addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) } @@ -57,7 +57,7 @@ func TestX509ClusterAuthentication_CanBeEnabled_IfX509AuthenticationIsEnabled_Re kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) } @@ -90,7 +90,7 @@ func TestUpdateOmAuthentication_NoAuthenticationEnabled(t *testing.T) { processNames := []string{"my-rs-0", "my-rs-1", "my-rs-2"} kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) r.updateOmAuthentication(ctx, conn, processNames, rs, "", "", "", false, zap.S()) ac, _ := conn.ReadAutomationConfig() @@ -111,7 +111,7 @@ func TestUpdateOmAuthentication_EnableX509_TlsNotEnabled(t *testing.T) { rs.Spec.Security.TLSConfig.Enabled = true kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, conn, []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "configuring both options at once should not result in a failed status") @@ -123,7 +123,7 @@ func TestUpdateOmAuthentication_EnableX509_WithTlsAlreadyEnabled(t *testing.T) { rs := DefaultReplicaSetBuilder().SetName("my-rs").SetMembers(3).EnableTLS().Build() omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs))) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "configuring x509 when tls has already been enabled should not result in a failed status") @@ -138,7 +138,7 @@ func TestUpdateOmAuthentication_AuthenticationIsNotConfigured_IfAuthIsNotSet(t * omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs))) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) status, _ := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "no authentication should have been configured") @@ -161,7 +161,7 @@ func TestUpdateOmAuthentication_DoesNotDisableAuth_IfAuthIsNotSet(t *testing.T) Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) @@ -174,7 +174,7 @@ func TestUpdateOmAuthentication_DoesNotDisableAuth_IfAuthIsNotSet(t *testing.T) rs.Spec.Security.Authentication = nil - reconciler = newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler = newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -196,7 +196,7 @@ func TestCanConfigureAuthenticationDisabled_WithNoModes(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) @@ -208,7 +208,7 @@ func TestUpdateOmAuthentication_EnableX509_FromEmptyDeployment(t *testing.T) { rs := DefaultReplicaSetBuilder().SetName("my-rs").SetMembers(3).EnableTLS().EnableAuth().EnableX509().Build() omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(om.NewDeployment())) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) createAgentCSRs(t, ctx, 1, r.client, certsv1.CertificateApproved) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) @@ -228,7 +228,7 @@ func TestX509AgentUserIsCorrectlyConfigured(t *testing.T) { // configure x509/tls resources addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -264,7 +264,7 @@ func TestScramAgentUserIsCorrectlyConfigured(t *testing.T) { assert.NoError(t, err) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -294,7 +294,7 @@ func TestScramAgentUser_IsNotOverridden(t *testing.T) { } }) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -313,7 +313,7 @@ func TestX509InternalClusterAuthentication_CanBeEnabledWithScram_ReplicaSet(t *t Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, r.client, rs) checkReconcileSuccessful(ctx, t, r, rs, kubeClient) @@ -366,7 +366,7 @@ func TestConfigureLdapDeploymentAuthentication_WithScramAgentAuthentication(t *t Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -393,7 +393,7 @@ func TestConfigureLdapDeploymentAuthentication_WithScramAgentAuthentication(t *t func TestConfigureLdapDeploymentAuthentication_WithCustomRole(t *testing.T) { ctx := context.Background() - customRoles := []mdbv1.MongoDbRole{ + customRoles := []mdbv1.MongoDBRole{ { Db: "admin", Role: "customRole", @@ -423,7 +423,7 @@ func TestConfigureLdapDeploymentAuthentication_WithCustomRole(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -440,7 +440,7 @@ func TestConfigureLdapDeploymentAuthentication_WithCustomRole(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "server0:1234", ac.Ldap.Servers) - roles := ac.Deployment["roles"].([]mdbv1.MongoDbRole) + roles := ac.Deployment["roles"].([]mdbv1.MongoDBRole) assert.Len(t, roles, 1) assert.Equal(t, customRoles, roles) } @@ -477,7 +477,7 @@ func TestConfigureLdapDeploymentAuthentication_WithAuthzQueryTemplate_AndUserToD Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -740,7 +740,7 @@ func TestInvalidPEM_SecretDoesNotContainKey(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) // Replace the secret with an empty one @@ -795,7 +795,7 @@ func Test_NoExternalDomainPresent(t *testing.T) { rs.Spec.ExternalAccessConfiguration = &mdbv1.ExternalAccessConfiguration{ExternalDomain: ptr.To("foo")} kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) secret := &corev1.Secret{} diff --git a/controllers/operator/common_controller.go b/controllers/operator/common_controller.go index b4a289510..3fb753d47 100644 --- a/controllers/operator/common_controller.go +++ b/controllers/operator/common_controller.go @@ -23,6 +23,7 @@ import ( v1 "github.com/mongodb/mongodb-kubernetes/api/v1" mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/backup" @@ -97,7 +98,31 @@ func NewReconcileCommonController(ctx context.Context, client client.Client) *Re } } -func ensureRoles(roles []mdbv1.MongoDbRole, conn om.Connection, log *zap.SugaredLogger) workflow.Status { +// ensureRoles will first check if both roles and roleRefs are populated. If both are, it will return an error, which is inline with the webhook validation rules. +// Otherwise, if roles is populated, then it will extract the list of roles and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +// If roleRefs is populated, it will extract the list of roles from the referenced resources and check if they are already set in Ops Manager. If they are not, it will update the roles in Ops Manager. +func (r *ReconcileCommonController) ensureRoles(ctx context.Context, db mdbv1.DbCommonSpec, enableClusterMongoDBRoles bool, conn om.Connection, mongodbResourceNsName types.NamespacedName, log *zap.SugaredLogger) workflow.Status { + localRoles := db.GetSecurity().Roles + roleRefs := db.GetSecurity().RoleRefs + + if len(localRoles) > 0 && len(roleRefs) > 0 { + return workflow.Failed(xerrors.Errorf("At most one one of roles or roleRefs can be non-empty.")) + } + + var roles []mdbv1.MongoDBRole + if len(roleRefs) > 0 { + if !enableClusterMongoDBRoles { + return workflow.Failed(xerrors.Errorf("RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration. This can be done by setting the operator.enableClusterMongoDBRoles to true in the helm values file, which will automatically installed the necessary RBAC. Alternatively, it can be enabled by adding -watch-resource=clustermongodbroles flag to the operator deployment, and manually creating the necessary RBAC.")) + } + var err error + roles, err = r.getRoleRefs(ctx, roleRefs, mongodbResourceNsName, db.Version) + if err != nil { + return workflow.Failed(err) + } + } else { + roles = localRoles + } + d, err := conn.ReadDeployment() if err != nil { return workflow.Failed(err) @@ -114,6 +139,8 @@ func ensureRoles(roles []mdbv1.MongoDbRole, conn om.Connection, log *zap.Sugared roles[i].Privileges = []mdbv1.Privilege{} } } + + log.Infof("Roles have been changed. Updating deployment in Ops Manager.") err = conn.ReadUpdateDeployment( func(d om.Deployment) error { d.SetRoles(roles) @@ -127,6 +154,45 @@ func ensureRoles(roles []mdbv1.MongoDbRole, conn om.Connection, log *zap.Sugared return workflow.OK() } +// getRoleRefs retrieves the roles from the referenced resources. It will return an error if any of the referenced resources are not found. +// It will also add the referenced resources to the resource watcher, so that they are watched for changes. +// The referenced resources are expected to be of kind ClusterMongoDBRole. +// This implementation is prepared for a future namespaced variant of ClusterMongoDBRole. +func (r *ReconcileCommonController) getRoleRefs(ctx context.Context, roleRefs []mdbv1.MongoDBRoleRef, mongodbResourceNsName types.NamespacedName, mdbVersion string) ([]mdbv1.MongoDBRole, error) { + roles := make([]mdbv1.MongoDBRole, len(roleRefs)) + + for idx, ref := range roleRefs { + var role mdbv1.MongoDBRole + switch ref.Kind { + + case util.ClusterMongoDBRoleKind: + customRole := &rolev1.ClusterMongoDBRole{} + + err := r.client.Get(ctx, types.NamespacedName{Name: ref.Name}, customRole) + if err != nil { + if apiErrors.IsNotFound(err) { + return nil, xerrors.Errorf("ClusterMongoDBRole '%s' not found. If the resource was deleted, the role is still present in MongoDB. To correctly remove a role from MongoDB, please remove the reference from spec.security.roleRefs.", ref.Name) + } + return nil, xerrors.Errorf("Failed to retrieve ClusterMongoDBRole '%s': %w", ref.Name, err) + } + + if res := mdbv1.RoleIsCorrectlyConfigured(customRole.Spec.MongoDBRole, mdbVersion); res.Level == v1.ErrorLevel { + return nil, xerrors.Errorf("Error validating role '%s' - %s", ref.Name, res.Msg) + } + + r.resourceWatcher.AddWatchedResourceIfNotAdded(ref.Name, "", watch.ClusterMongoDBRole, mongodbResourceNsName) + role = customRole.Spec.MongoDBRole + + default: + return nil, xerrors.Errorf("Invalid value %s for roleRef.kind. It must be %s.", ref.Kind, util.ClusterMongoDBRoleKind) + } + + roles[idx] = role + } + + return roles, nil +} + // updateStatus updates the status for the CR using patch operation. Note, that the resource status is mutated and // it's important to pass resource by pointer to all methods which invoke current 'updateStatus'. func (r *ReconcileCommonController) updateStatus(ctx context.Context, reconciledResource v1.CustomResourceReadWriter, st workflow.Status, log *zap.SugaredLogger, statusOptions ...status.Option) (reconcile.Result, error) { diff --git a/controllers/operator/common_controller_test.go b/controllers/operator/common_controller_test.go index 2d03b9269..da0928f67 100644 --- a/controllers/operator/common_controller_test.go +++ b/controllers/operator/common_controller_test.go @@ -23,6 +23,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om" + "github.com/mongodb/mongodb-kubernetes/api/v1/role" "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/deployment" @@ -265,9 +266,124 @@ func TestReadSubjectNoCertificate(t *testing.T) { assertSubjectFromFileFails(t, "testdata/certificates/just_key") } +func TestFailWhenRoleAndRoleRefsAreConfigured(t *testing.T) { + ctx := context.Background() + customRole := mdbv1.MongoDBRole{ + Role: "foo", + AuthenticationRestrictions: []mdbv1.AuthenticationRestriction{}, + Db: "admin", + Roles: []mdbv1.InheritedRole{{ + Db: "admin", + Role: "readWriteAnyDatabase", + }}, + } + roleResource := role.DefaultClusterMongoDBRoleBuilder().Build() + roleRef := mdbv1.MongoDBRoleRef{ + Name: roleResource.Name, + Kind: util.ClusterMongoDBRoleKind, + } + assert.Nil(t, customRole.Privileges) + rs := mdbv1.NewDefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{customRole}).SetRoleRefs([]mdbv1.MongoDBRoleRef{roleRef}).Build() + + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + assert.False(t, result.IsOK()) + assert.Equal(t, status.PhaseFailed, result.Phase()) + + ac, err := mockOm.ReadAutomationConfig() + assert.NoError(t, err) + roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) + assert.False(t, ok) + assert.Empty(t, roles) +} + +func TestRoleRefsAreAdded(t *testing.T) { + ctx := context.Background() + roleResource := role.DefaultClusterMongoDBRoleBuilder().Build() + roleRefs := []mdbv1.MongoDBRoleRef{ + { + Name: roleResource.Name, + Kind: util.ClusterMongoDBRoleKind, + }, + } + rs := mdbv1.NewDefaultReplicaSetBuilder().SetRoleRefs(roleRefs).Build() + + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + _ = kubeClient.Create(ctx, roleResource) + + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + + ac, err := mockOm.ReadAutomationConfig() + assert.NoError(t, err) + roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) + assert.True(t, ok) + assert.NotNil(t, roles[0].Privileges) + assert.Len(t, roles, 1) +} + +func TestErrorWhenRoleRefIsWrong(t *testing.T) { + ctx := context.Background() + roleResource := role.DefaultClusterMongoDBRoleBuilder().Build() + roleRefs := []mdbv1.MongoDBRoleRef{ + { + Name: roleResource.Name, + Kind: "WrongMongoDBRoleReference", + }, + } + rs := mdbv1.NewDefaultReplicaSetBuilder().SetRoleRefs(roleRefs).Build() + + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + _ = kubeClient.Create(ctx, roleResource) + + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + assert.False(t, result.IsOK()) + assert.Equal(t, status.PhaseFailed, result.Phase()) + + ac, err := mockOm.ReadAutomationConfig() + assert.NoError(t, err) + roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) + assert.False(t, ok) + assert.Empty(t, roles) +} + +func TestErrorWhenRoleDoesNotExist(t *testing.T) { + ctx := context.Background() + roleResource := role.DefaultClusterMongoDBRoleBuilder().Build() + roleRefs := []mdbv1.MongoDBRoleRef{ + { + Name: roleResource.Name, + Kind: util.ClusterMongoDBRoleKind, + }, + } + rs := mdbv1.NewDefaultReplicaSetBuilder().SetRoleRefs(roleRefs).Build() + + kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() + controller := NewReconcileCommonController(ctx, kubeClient) + mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) + + result := controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) + assert.False(t, result.IsOK()) + assert.Equal(t, status.PhaseFailed, result.Phase()) + + ac, err := mockOm.ReadAutomationConfig() + assert.NoError(t, err) + roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) + assert.False(t, ok) + assert.Empty(t, roles) +} + func TestDontSendNilPrivileges(t *testing.T) { ctx := context.Background() - customRole := mdbv1.MongoDbRole{ + customRole := mdbv1.MongoDBRole{ Role: "foo", AuthenticationRestrictions: []mdbv1.AuthenticationRestriction{}, Db: "admin", @@ -277,14 +393,14 @@ func TestDontSendNilPrivileges(t *testing.T) { }}, } assert.Nil(t, customRole.Privileges) - rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDbRole{customRole}).Build() + rs := DefaultReplicaSetBuilder().SetRoles([]mdbv1.MongoDBRole{customRole}).Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient() controller := NewReconcileCommonController(ctx, kubeClient) mockOm, _ := prepareConnection(ctx, controller, omConnectionFactory.GetConnectionFunc, t) - ensureRoles(rs.Spec.Security.Roles, mockOm, &zap.SugaredLogger{}) + controller.ensureRoles(ctx, rs.Spec.DbCommonSpec, true, mockOm, kube.ObjectKeyFromApiObject(rs), zap.S()) ac, err := mockOm.ReadAutomationConfig() assert.NoError(t, err) - roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDbRole) + roles, ok := ac.Deployment["roles"].([]mdbv1.MongoDBRole) assert.True(t, ok) assert.NotNil(t, roles[0].Privileges) } diff --git a/controllers/operator/construct/multicluster/multicluster_replicaset_test.go b/controllers/operator/construct/multicluster/multicluster_replicaset_test.go index 218a5ef9c..8086ed554 100644 --- a/controllers/operator/construct/multicluster/multicluster_replicaset_test.go +++ b/controllers/operator/construct/multicluster/multicluster_replicaset_test.go @@ -44,7 +44,7 @@ func getMultiClusterMongoDB() mdbmulti.MongoDBMultiCluster { Authentication: &mdb.Authentication{ Modes: []mdb.AuthMode{}, }, - Roles: []mdb.MongoDbRole{}, + Roles: []mdb.MongoDBRole{}, }, }, ClusterSpecList: mdb.ClusterSpecList{ diff --git a/controllers/operator/mock/mockedkubeclient.go b/controllers/operator/mock/mockedkubeclient.go index c2b972651..7bc1d60d8 100644 --- a/controllers/operator/mock/mockedkubeclient.go +++ b/controllers/operator/mock/mockedkubeclient.go @@ -23,6 +23,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" "github.com/mongodb/mongodb-kubernetes/api/v1/mdbmulti" omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" searchv1 "github.com/mongodb/mongodb-kubernetes/api/v1/search" "github.com/mongodb/mongodb-kubernetes/api/v1/user" "github.com/mongodb/mongodb-kubernetes/controllers/om" @@ -104,7 +105,12 @@ func NewEmptyFakeClientBuilder() *fake.ClientBuilder { return nil } - builder.WithStatusSubresource(&mdbv1.MongoDB{}, &mdbmulti.MongoDBMultiCluster{}, &omv1.MongoDBOpsManager{}, &user.MongoDBUser{}, &searchv1.MongoDBSearch{}, &mdbcv1.MongoDBCommunity{}) + err = rolev1.AddToScheme(s) + if err != nil { + return nil + } + + builder.WithStatusSubresource(&mdbv1.MongoDB{}, &mdbmulti.MongoDBMultiCluster{}, &omv1.MongoDBOpsManager{}, &user.MongoDBUser{}, &searchv1.MongoDBSearch{}, &mdbcv1.MongoDBCommunity{}, &rolev1.ClusterMongoDBRole{}) ot := testing.NewObjectTracker(s, scheme.Codecs.UniversalDecoder()) return builder.WithScheme(s).WithObjectTracker(ot) diff --git a/controllers/operator/mongodbmultireplicaset_controller.go b/controllers/operator/mongodbmultireplicaset_controller.go index caebe414c..11d2daeb2 100644 --- a/controllers/operator/mongodbmultireplicaset_controller.go +++ b/controllers/operator/mongodbmultireplicaset_controller.go @@ -33,6 +33,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" mdbmultiv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdbmulti" omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" mdbstatus "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/host" @@ -80,6 +81,7 @@ type ReconcileMongoDbMultiReplicaSet struct { memberClusterClientsMap map[string]kubernetesClient.Client // holds the client for each of the memberclusters(where the MongoDB ReplicaSet is deployed) memberClusterSecretClientsMap map[string]secrets.SecretClient forceEnterprise bool + enableClusterMongoDBRoles bool imageUrls images.ImageUrls initDatabaseNonStaticImageVersion string @@ -88,7 +90,7 @@ type ReconcileMongoDbMultiReplicaSet struct { var _ reconcile.Reconciler = &ReconcileMongoDbMultiReplicaSet{} -func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, omFunc om.ConnectionFactory, memberClustersMap map[string]client.Client) *ReconcileMongoDbMultiReplicaSet { +func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory, memberClustersMap map[string]client.Client) *ReconcileMongoDbMultiReplicaSet { clientsMap := make(map[string]kubernetesClient.Client) secretClientsMap := make(map[string]secrets.SecretClient) @@ -110,6 +112,7 @@ func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client. imageUrls: imageUrls, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, } } @@ -360,7 +363,7 @@ func (r *ReconcileMongoDbMultiReplicaSet) reconcileMemberResources(ctx context.C } } // Ensure custom roles are created in OM - if status := ensureRoles(mrs.GetSecurity().Roles, conn, log); !status.IsOK() { + if status := r.ensureRoles(ctx, mrs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(mrs), log); !status.IsOK() { return status } @@ -1072,9 +1075,9 @@ func (r *ReconcileMongoDbMultiReplicaSet) reconcileOMCAConfigMap(ctx context.Con // AddMultiReplicaSetController creates a new MongoDbMultiReplicaset Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddMultiReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, memberClustersMap map[string]cluster.Cluster) error { +func AddMultiReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { // Create a new controller - reconciler := newMultiClusterReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, om.NewOpsManagerConnection, multicluster.ClustersMapToClientMap(memberClustersMap)) + reconciler := newMultiClusterReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection, multicluster.ClustersMapToClientMap(memberClustersMap)) c, err := controller.New(util.MongoDbMultiClusterController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err @@ -1113,6 +1116,14 @@ func AddMultiReplicaSetController(ctx context.Context, mgr manager.Manager, imag return err } + if enableClusterMongoDBRoles { + err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &rolev1.ClusterMongoDBRole{}, + &watch.ResourcesHandler{ResourceType: watch.ClusterMongoDBRole, ResourceWatcher: reconciler.resourceWatcher})) + if err != nil { + return err + } + } + // register watcher across member clusters for k, v := range memberClustersMap { err := c.Watch(source.Kind[client.Object](v.GetCache(), &appsv1.StatefulSet{}, &khandler.EnqueueRequestForOwnerMultiCluster{}, watch.PredicatesForMultiStatefulSet())) diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 730a402a4..fb9819283 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -758,7 +758,7 @@ func TestMultiReplicaSetRace(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory().WithResourceToProjectMapping(resourceToProjectMapping) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(clusters, omConnectionFactory, true, true) - reconciler := newMultiClusterReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc, memberClusterMap) + reconciler := newMultiClusterReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc, memberClusterMap) testConcurrentReconciles(ctx, t, fakeClient, reconciler, rs1, rs2, rs3) } @@ -1430,7 +1430,7 @@ func calculateHostNamesForExternalDomains(m *mdbmulti.MongoDBMultiCluster) []str func multiReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, m *mdbmulti.MongoDBMultiCluster) (*ReconcileMongoDbMultiReplicaSet, kubernetesClient.Client, map[string]client.Client, *om.CachedOMConnectionFactory) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(m) memberClusterMap := getFakeMultiClusterMap(omConnectionFactory) - return newMultiClusterReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc, memberClusterMap), kubeClient, memberClusterMap, omConnectionFactory + return newMultiClusterReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc, memberClusterMap), kubeClient, memberClusterMap, omConnectionFactory } func getFakeMultiClusterMap(omConnectionFactory *om.CachedOMConnectionFactory) map[string]client.Client { diff --git a/controllers/operator/mongodbreplicaset_controller.go b/controllers/operator/mongodbreplicaset_controller.go index 028753228..c16ed89c6 100644 --- a/controllers/operator/mongodbreplicaset_controller.go +++ b/controllers/operator/mongodbreplicaset_controller.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" mdbstatus "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/backup" @@ -45,6 +46,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/util/scale" "github.com/mongodb/mongodb-kubernetes/pkg/dns" "github.com/mongodb/mongodb-kubernetes/pkg/images" + "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/mongodb/mongodb-kubernetes/pkg/statefulset" "github.com/mongodb/mongodb-kubernetes/pkg/util" "github.com/mongodb/mongodb-kubernetes/pkg/util/architectures" @@ -57,9 +59,10 @@ import ( // ReconcileMongoDbReplicaSet reconciles a MongoDB with a type of ReplicaSet type ReconcileMongoDbReplicaSet struct { *ReconcileCommonController - omConnectionFactory om.ConnectionFactory - imageUrls images.ImageUrls - forceEnterprise bool + omConnectionFactory om.ConnectionFactory + imageUrls images.ImageUrls + forceEnterprise bool + enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string databaseNonStaticImageVersion string @@ -67,12 +70,13 @@ type ReconcileMongoDbReplicaSet struct { var _ reconcile.Reconciler = &ReconcileMongoDbReplicaSet{} -func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { +func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { return &ReconcileMongoDbReplicaSet{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, imageUrls: imageUrls, forceEnterprise: forceEnterprise, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, @@ -216,7 +220,7 @@ func (r *ReconcileMongoDbReplicaSet) Reconcile(ctx context.Context, request reco } sts := construct.DatabaseStatefulSet(*rs, rsConfig, log) - if status := ensureRoles(rs.Spec.GetSecurity().Roles, conn, log); !status.IsOK() { + if status := r.ensureRoles(ctx, rs.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(rs), log); !status.IsOK() { return r.updateStatus(ctx, rs, status, log) } @@ -345,9 +349,9 @@ func (r *ReconcileMongoDbReplicaSet) reconcileHostnameOverrideConfigMap(ctx cont // AddReplicaSetController creates a new MongoDbReplicaset Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool) error { +func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool) error { // Create a new controller - reconciler := newReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, om.NewOpsManagerConnection) + reconciler := newReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection) c, err := controller.New(util.MongoDbReplicaSetController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err @@ -386,6 +390,14 @@ func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls return err } + if enableClusterMongoDBRoles { + err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &rolev1.ClusterMongoDBRole{}, + &watch.ResourcesHandler{ResourceType: watch.ClusterMongoDBRole, ResourceWatcher: reconciler.resourceWatcher})) + if err != nil { + return err + } + } + // if vault secret backend is enabled watch for Vault secret change and trigger reconcile if vault.IsVaultSecretBackend() { eventChannel := make(chan event.GenericEvent) diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 107091d88..4c9abb64c 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -92,7 +92,7 @@ func TestReplicaSetRace(t *testing.T) { Get: mock.GetFakeClientInterceptorGetFunc(omConnectionFactory, true, true), }).Build() - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) testConcurrentReconciles(ctx, t, fakeClient, reconciler, rs, rs2, rs3) } @@ -410,7 +410,7 @@ func TestCreateDeleteReplicaSet(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) omConn := omConnectionFactory.GetConnection() @@ -549,7 +549,7 @@ func TestFeatureControlPolicyAndTagAddedWithNewerOpsManager(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) @@ -573,7 +573,7 @@ func TestFeatureControlPolicyNoAuthNewerOpsManager(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) @@ -983,7 +983,7 @@ func assertCorrectNumberOfMembersAndProcesses(ctx context.Context, t *testing.T, func defaultReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, rs *mdbv1.MongoDB) (*ReconcileMongoDbReplicaSet, kubernetesClient.Client, *om.CachedOMConnectionFactory) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - return newReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory + return newReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory } // newDefaultPodSpec creates pod spec with default values,sets only the topology key and persistence sizes, @@ -1020,7 +1020,7 @@ func DefaultReplicaSetBuilder() *ReplicaSetBuilder { Security: &mdbv1.Security{ TLSConfig: &mdbv1.TLSConfig{}, Authentication: &mdbv1.Authentication{}, - Roles: []mdbv1.MongoDbRole{}, + Roles: []mdbv1.MongoDBRole{}, }, }, Members: 3, @@ -1073,7 +1073,7 @@ func (b *ReplicaSetBuilder) SetAuthentication(auth *mdbv1.Authentication) *Repli return b } -func (b *ReplicaSetBuilder) SetRoles(roles []mdbv1.MongoDbRole) *ReplicaSetBuilder { +func (b *ReplicaSetBuilder) SetRoles(roles []mdbv1.MongoDBRole) *ReplicaSetBuilder { if b.Spec.Security == nil { b.Spec.Security = &mdbv1.Security{} } diff --git a/controllers/operator/mongodbshardedcluster_controller.go b/controllers/operator/mongodbshardedcluster_controller.go index 07c6ea2a0..fb98984ef 100644 --- a/controllers/operator/mongodbshardedcluster_controller.go +++ b/controllers/operator/mongodbshardedcluster_controller.go @@ -30,6 +30,7 @@ import ( mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" mdbstatus "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/backup" @@ -75,22 +76,24 @@ import ( // ReconcileMongoDbShardedCluster is the reconciler for the sharded cluster type ReconcileMongoDbShardedCluster struct { *ReconcileCommonController - omConnectionFactory om.ConnectionFactory - memberClustersMap map[string]client.Client - imageUrls images.ImageUrls - forceEnterprise bool + omConnectionFactory om.ConnectionFactory + memberClustersMap map[string]client.Client + imageUrls images.ImageUrls + forceEnterprise bool + enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string databaseNonStaticImageVersion string } -func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, memberClusterMap map[string]client.Client, omFunc om.ConnectionFactory) *ReconcileMongoDbShardedCluster { +func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterMap map[string]client.Client, omFunc om.ConnectionFactory) *ReconcileMongoDbShardedCluster { return &ReconcileMongoDbShardedCluster{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, memberClustersMap: memberClusterMap, forceEnterprise: forceEnterprise, imageUrls: imageUrls, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, @@ -625,11 +628,12 @@ func processClusterSpecList( } type ShardedClusterReconcileHelper struct { - commonController *ReconcileCommonController - omConnectionFactory om.ConnectionFactory - imageUrls images.ImageUrls - forceEnterprise bool - automationAgentVersion string + commonController *ReconcileCommonController + omConnectionFactory om.ConnectionFactory + imageUrls images.ImageUrls + forceEnterprise bool + enableClusterMongoDBRoles bool + automationAgentVersion string initDatabaseNonStaticImageVersion string databaseNonStaticImageVersion string @@ -667,7 +671,7 @@ func NewReadOnlyClusterReconcilerHelper( globalMemberClustersMap map[string]client.Client, log *zap.SugaredLogger, ) (*ShardedClusterReconcileHelper, error) { - return newShardedClusterReconcilerHelper(ctx, reconciler, nil, "", "", false, + return newShardedClusterReconcilerHelper(ctx, reconciler, nil, "", "", false, false, sc, globalMemberClustersMap, nil, log, true) } @@ -678,13 +682,14 @@ func NewShardedClusterReconcilerHelper( initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, + enableClusterMongoDBRoles bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, omConnectionFactory om.ConnectionFactory, log *zap.SugaredLogger, ) (*ShardedClusterReconcileHelper, error) { return newShardedClusterReconcilerHelper(ctx, reconciler, imageUrls, initDatabaseNonStaticImageVersion, - databaseNonStaticImageVersion, forceEnterprise, sc, globalMemberClustersMap, omConnectionFactory, log, false) + databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, sc, globalMemberClustersMap, omConnectionFactory, log, false) } func newShardedClusterReconcilerHelper( @@ -694,6 +699,7 @@ func newShardedClusterReconcilerHelper( initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, + enableClusterMongoDBRoles bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, omConnectionFactory om.ConnectionFactory, @@ -708,10 +714,11 @@ func newShardedClusterReconcilerHelper( globalMemberClustersMap = multicluster.InitializeGlobalMemberClusterMapForSingleCluster(globalMemberClustersMap, reconciler.client) helper := &ShardedClusterReconcileHelper{ - commonController: reconciler, - omConnectionFactory: omConnectionFactory, - imageUrls: imageUrls, - forceEnterprise: forceEnterprise, + commonController: reconciler, + omConnectionFactory: omConnectionFactory, + imageUrls: imageUrls, + forceEnterprise: forceEnterprise, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, @@ -842,7 +849,7 @@ func (r *ReconcileMongoDbShardedCluster) Reconcile(ctx context.Context, request return reconcileResult, err } - reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, sc, r.memberClustersMap, r.omConnectionFactory, log) + reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, r.enableClusterMongoDBRoles, sc, r.memberClustersMap, r.omConnectionFactory, log) if err != nil { return r.updateStatus(ctx, sc, workflow.Failed(xerrors.Errorf("Failed to initialize sharded cluster reconciler: %w", err)), log) } @@ -851,7 +858,7 @@ func (r *ReconcileMongoDbShardedCluster) Reconcile(ctx context.Context, request // OnDelete tries to complete a Deletion reconciliation event func (r *ReconcileMongoDbShardedCluster) OnDelete(ctx context.Context, obj runtime.Object, log *zap.SugaredLogger) error { - reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, obj.(*mdbv1.MongoDB), r.memberClustersMap, r.omConnectionFactory, log) + reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, r.enableClusterMongoDBRoles, obj.(*mdbv1.MongoDB), r.memberClustersMap, r.omConnectionFactory, log) if err != nil { return err } @@ -1073,7 +1080,7 @@ func (r *ShardedClusterReconcileHelper) doShardedClusterProcessing(ctx context.C } } - if workflowStatus := ensureRoles(sc.Spec.GetSecurity().Roles, conn, log); !workflowStatus.IsOK() { + if workflowStatus := r.commonController.ensureRoles(ctx, sc.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(sc), log); !workflowStatus.IsOK() { return workflowStatus } @@ -1633,9 +1640,9 @@ func logDiffOfProcessNames(acProcesses []string, healthyProcesses []string, log } } -func AddShardedClusterController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, memberClustersMap map[string]cluster.Cluster) error { +func AddShardedClusterController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { // Create a new controller - reconciler := newShardedClusterReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, multicluster.ClustersMapToClientMap(memberClustersMap), om.NewOpsManagerConnection) + reconciler := newShardedClusterReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, multicluster.ClustersMapToClientMap(memberClustersMap), om.NewOpsManagerConnection) options := controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)} // nolint:forbidigo c, err := controller.New(util.MongoDbShardedClusterController, mgr, options) if err != nil { @@ -1675,6 +1682,15 @@ func AddShardedClusterController(ctx context.Context, mgr manager.Manager, image zap.S().Errorf("Failed to watch for vault secret changes: %w", err) } } + + if enableClusterMongoDBRoles { + err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &rolev1.ClusterMongoDBRole{}, + &watch.ResourcesHandler{ResourceType: watch.ClusterMongoDBRole, ResourceWatcher: reconciler.resourceWatcher})) + if err != nil { + return err + } + } + zap.S().Infof("Registered controller %s", util.MongoDbShardedClusterController) return nil diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index fb3ca64e1..158665f3b 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -43,8 +43,8 @@ import ( ) func newShardedClusterReconcilerForMultiCluster(ctx context.Context, forceEnterprise bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { - r := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) + r := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) if err != nil { return nil, nil, err } @@ -1249,7 +1249,7 @@ func TestMultiClusterShardedSetRace(t *testing.T) { globalMemberClustersMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusterNames, omConnectionFactory, true, false) ctx := context.Background() - reconciler := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) allHostnames := generateHostsForCluster(ctx, reconciler, false, sc, mongosDistribution, configSrvDistribution, shardDistribution) allHostnames1 := generateHostsForCluster(ctx, reconciler, false, sc1, mongosDistribution, configSrvDistribution, shardDistribution) @@ -2410,7 +2410,7 @@ func reconcileUntilSuccessful(ctx context.Context, t *testing.T, reconciler reco } func generateHostsForCluster(ctx context.Context, reconciler *ReconcileMongoDbShardedCluster, forceEnterprise bool, sc *mdbv1.MongoDB, mongosDistribution map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) []string { - reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zap.S()) + reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zap.S()) allHostnames, _ := generateAllHosts(sc, mongosDistribution, reconcileHelper.deploymentState.ClusterMapping, configSrvDistribution, shardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) return allHostnames } diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 782b4dd4a..8404ebabb 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -197,7 +197,7 @@ func TestShardedClusterRace(t *testing.T) { WithObjects(mock.GetDefaultResources()...). Build() - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) testConcurrentReconciles(ctx, t, fakeClient, reconciler, sc1, sc2, sc3) } @@ -1052,7 +1052,7 @@ func TestFeatureControlsNoAuth(t *testing.T) { sc := test.DefaultClusterBuilder().RemoveAuth().Build() omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, sc) - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, sc, fakeClient) @@ -1253,7 +1253,7 @@ func TestFeatureControlsAuthEnabled(t *testing.T) { sc := test.DefaultClusterBuilder().Build() omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, sc) - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, sc, fakeClient) @@ -1690,8 +1690,8 @@ func defaultClusterReconciler(ctx context.Context, imageUrls images.ImageUrls, i } func newShardedClusterReconcilerFromResource(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { - r := newShardedClusterReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) + r := newShardedClusterReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) if err != nil { return nil, nil, err } diff --git a/controllers/operator/mongodbstandalone_controller.go b/controllers/operator/mongodbstandalone_controller.go index 41612848b..1078fd6b8 100644 --- a/controllers/operator/mongodbstandalone_controller.go +++ b/controllers/operator/mongodbstandalone_controller.go @@ -20,6 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" mdbstatus "github.com/mongodb/mongodb-kubernetes/api/v1/status" "github.com/mongodb/mongodb-kubernetes/controllers/om" "github.com/mongodb/mongodb-kubernetes/controllers/om/deployment" @@ -39,6 +40,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/util/merge" "github.com/mongodb/mongodb-kubernetes/pkg/dns" "github.com/mongodb/mongodb-kubernetes/pkg/images" + "github.com/mongodb/mongodb-kubernetes/pkg/kube" "github.com/mongodb/mongodb-kubernetes/pkg/statefulset" "github.com/mongodb/mongodb-kubernetes/pkg/util" "github.com/mongodb/mongodb-kubernetes/pkg/util/architectures" @@ -49,9 +51,9 @@ import ( // AddStandaloneController creates a new MongoDbStandalone Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool) error { +func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool) error { // Create a new controller - reconciler := newStandaloneReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, om.NewOpsManagerConnection) + reconciler := newStandaloneReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection) c, err := controller.New(util.MongoDbStandaloneController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err @@ -85,6 +87,14 @@ func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls return err } + if enableClusterMongoDBRoles { + err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &rolev1.ClusterMongoDBRole{}, + &watch.ResourcesHandler{ResourceType: watch.ClusterMongoDBRole, ResourceWatcher: reconciler.resourceWatcher})) + if err != nil { + return err + } + } + // if vault secret backend is enabled watch for Vault secret change and trigger reconcile if vault.IsVaultSecretBackend() { eventChannel := make(chan event.GenericEvent) @@ -103,12 +113,13 @@ func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls return nil } -func newStandaloneReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, omFunc om.ConnectionFactory) *ReconcileMongoDbStandalone { +func newStandaloneReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbStandalone { return &ReconcileMongoDbStandalone{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, imageUrls: imageUrls, forceEnterprise: forceEnterprise, + enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, @@ -118,9 +129,10 @@ func newStandaloneReconciler(ctx context.Context, kubeClient client.Client, imag // ReconcileMongoDbStandalone reconciles a MongoDbStandalone object type ReconcileMongoDbStandalone struct { *ReconcileCommonController - omConnectionFactory om.ConnectionFactory - imageUrls images.ImageUrls - forceEnterprise bool + omConnectionFactory om.ConnectionFactory + imageUrls images.ImageUrls + forceEnterprise bool + enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string databaseNonStaticImageVersion string @@ -202,7 +214,7 @@ func (r *ReconcileMongoDbStandalone) Reconcile(ctx context.Context, request reco return r.updateStatus(ctx, s, status, log) } - if status := ensureRoles(s.Spec.GetSecurity().Roles, conn, log); !status.IsOK() { + if status := r.ensureRoles(ctx, s.Spec.DbCommonSpec, r.enableClusterMongoDBRoles, conn, kube.ObjectKeyFromApiObject(s), log); !status.IsOK() { return r.updateStatus(ctx, s, status, log) } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index 14f4d2613..1663b24bd 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -151,7 +151,7 @@ func TestOnAddStandaloneWithDelay(t *testing.T) { }, }) - reconciler := newStandaloneReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) + reconciler := newStandaloneReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) checkReconcilePending(ctx, t, reconciler, st, "StatefulSet not ready", kubeClient, 3) // this affects Get interceptor func, blocking automatically marking sts as ready @@ -330,7 +330,7 @@ func TestStandaloneAgentVersionMapping(t *testing.T) { func defaultStandaloneReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, omConnectionFactoryFunc om.ConnectionFactory, rs *mdbv1.MongoDB) (*ReconcileMongoDbStandalone, kubernetesClient.Client, *om.CachedOMConnectionFactory) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFunc) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - return newStandaloneReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory + return newStandaloneReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory } // TODO remove in favor of '/api/mongodbbuilder.go' diff --git a/controllers/operator/mongodbuser_controller.go b/controllers/operator/mongodbuser_controller.go index ac2f5cb2f..08fab6a05 100644 --- a/controllers/operator/mongodbuser_controller.go +++ b/controllers/operator/mongodbuser_controller.go @@ -174,8 +174,8 @@ func (r *MongoDBUserReconciler) Reconcile(ctx context.Context, request reconcile log.Warnf("Couldn't fetch MongoDB Single/Multi Cluster Resource with name: %s, namespace: %s, err: %s", user.Spec.MongoDBResourceRef.Name, user.Spec.MongoDBResourceRef.Namespace, err) - if controllerutil.ContainsFinalizer(user, util.Finalizer) { - controllerutil.RemoveFinalizer(user, util.Finalizer) + if controllerutil.ContainsFinalizer(user, util.UserFinalizer) { + controllerutil.RemoveFinalizer(user, util.UserFinalizer) if err := r.client.Update(ctx, user); err != nil { return r.updateStatus(ctx, user, workflow.Failed(xerrors.Errorf("Failed to update the user with the removed finalizer: %w", err)), log) } @@ -213,7 +213,7 @@ func (r *MongoDBUserReconciler) Reconcile(ctx context.Context, request reconcile if !user.DeletionTimestamp.IsZero() { log.Info("MongoDBUser is being deleted") - if controllerutil.ContainsFinalizer(user, util.Finalizer) { + if controllerutil.ContainsFinalizer(user, util.UserFinalizer) { return r.preDeletionCleanup(ctx, user, conn, log) } } @@ -509,8 +509,8 @@ func (r *MongoDBUserReconciler) preDeletionCleanup(ctx context.Context, user *us return r.updateStatus(ctx, user, workflow.Failed(xerrors.Errorf("Failed to perform AutomationConfig cleanup: %w", err)), log) } - if finalizerRemoved := controllerutil.RemoveFinalizer(user, util.Finalizer); !finalizerRemoved { - return r.updateStatus(ctx, user, workflow.Failed(xerrors.Errorf("Failed to remove finalizer: %w", err)), log) + if finalizerRemoved := controllerutil.RemoveFinalizer(user, util.UserFinalizer); !finalizerRemoved { + return r.updateStatus(ctx, user, workflow.Failed(xerrors.Errorf("Failed to remove finalizer")), log) } if err := r.client.Update(ctx, user); err != nil { @@ -522,7 +522,7 @@ func (r *MongoDBUserReconciler) preDeletionCleanup(ctx context.Context, user *us func (r *MongoDBUserReconciler) ensureFinalizer(ctx context.Context, user *userv1.MongoDBUser, log *zap.SugaredLogger) error { log.Info("Adding finalizer to the MongoDBUser resource") - if finalizerAdded := controllerutil.AddFinalizer(user, util.Finalizer); finalizerAdded { + if finalizerAdded := controllerutil.AddFinalizer(user, util.UserFinalizer); finalizerAdded { if err := r.client.Update(ctx, user); err != nil { return err } diff --git a/controllers/operator/mongodbuser_controller_test.go b/controllers/operator/mongodbuser_controller_test.go index 69cf04671..675e99188 100644 --- a/controllers/operator/mongodbuser_controller_test.go +++ b/controllers/operator/mongodbuser_controller_test.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" + apiErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb" @@ -402,7 +403,7 @@ func TestFinalizerIsAdded_WhenUserIsCreated(t *testing.T) { _ = client.Get(ctx, kube.ObjectKey(user.Namespace, user.Name), user) - assert.Contains(t, user.GetFinalizers(), util.Finalizer) + assert.Contains(t, user.GetFinalizers(), util.UserFinalizer) } func TestUserReconciler_SavesConnectionStringForMultiShardedCluster(t *testing.T) { @@ -497,7 +498,8 @@ func TestFinalizerIsRemoved_WhenUserIsDeleted(t *testing.T) { assert.Nil(t, err, "there should be no error on successful reconciliation") assert.Equal(t, newExpected, newResult, "there should be a successful reconciliation if the password is a valid reference") - assert.Empty(t, user.GetFinalizers()) + err = client.Get(ctx, kube.ObjectKey(user.Namespace, user.Name), user) + assert.True(t, apiErrors.IsNotFound(err), "the user should not exist") } // BuildAuthenticationEnabledReplicaSet returns a AutomationConfig after creating a Replica Set with a set of diff --git a/controllers/operator/watch/config_change_handler.go b/controllers/operator/watch/config_change_handler.go index f3cac99b0..b5c36c364 100644 --- a/controllers/operator/watch/config_change_handler.go +++ b/controllers/operator/watch/config_change_handler.go @@ -14,15 +14,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" corev1 "k8s.io/api/core/v1" + + rolev1 "github.com/mongodb/mongodb-kubernetes/api/v1/role" ) // Type is an enum for all kubernetes types watched by controller for changes for configuration type Type string const ( - ConfigMap Type = "ConfigMap" - Secret Type = "Secret" - MongoDB Type = "MongoDB" + ConfigMap Type = "ConfigMap" + Secret Type = "Secret" + MongoDB Type = "MongoDB" + ClusterMongoDBRole Type = "ClusterMongoDBRole" ) // the Object watched by controller. Includes its type and namespace+name @@ -71,19 +74,23 @@ func shouldHandleUpdate(e event.UpdateEvent) bool { } func (c *ResourcesHandler) doHandle(namespace, name string, q workqueue.RateLimitingInterface) { - configMapOrSecret := Object{ + object := Object{ ResourceType: c.ResourceType, Resource: types.NamespacedName{Name: name, Namespace: namespace}, } - for _, v := range c.ResourceWatcher.GetWatchedResources()[configMapOrSecret] { - zap.S().Infof("%s has been modified -> triggering reconciliation for dependent Resource %s", configMapOrSecret, v) + for _, v := range c.ResourceWatcher.GetWatchedResources()[object] { + zap.S().Infof("%s has been modified -> triggering reconciliation for dependent Resource %s", object, v) q.Add(reconcile.Request{NamespacedName: v}) } } // Seems we don't need to react on config map/secret removal.. -func (c *ResourcesHandler) Delete(ctx context.Context, _ event.DeleteEvent, _ workqueue.RateLimitingInterface) { +func (c *ResourcesHandler) Delete(ctx context.Context, e event.DeleteEvent, q workqueue.RateLimitingInterface) { + switch v := e.Object.(type) { + case *rolev1.ClusterMongoDBRole: + c.doHandle(v.GetNamespace(), v.GetName(), q) + } } func (c *ResourcesHandler) Generic(ctx context.Context, _ event.GenericEvent, _ workqueue.RateLimitingInterface) { diff --git a/docker/mongodb-kubernetes-tests/kubeobject/customobject.py b/docker/mongodb-kubernetes-tests/kubeobject/customobject.py index d902a0112..b3dd5d157 100644 --- a/docker/mongodb-kubernetes-tests/kubeobject/customobject.py +++ b/docker/mongodb-kubernetes-tests/kubeobject/customobject.py @@ -145,10 +145,12 @@ def _reload_if_needed(self): self.reload() @classmethod - def from_yaml(cls, yaml_file, name=None, namespace=None): + def from_yaml(cls, yaml_file, name=None, namespace=None, cluster_scoped=False): """Creates a `CustomObject` from a yaml file. In this case, `name` and `namespace` are optional in this function's signature, because they might be passed as part of the `yaml_file` document. + If creating ClusterScoped objects, `namespace` is not needed, + but the cluster_scoped flag should be set to true """ doc = yaml.safe_load(open(yaml_file)) @@ -161,22 +163,25 @@ def from_yaml(cls, yaml_file, name=None, namespace=None): "or exist in the `metadata` section of the yaml document." ) - if (namespace is None or namespace == "") and "namespace" not in doc["metadata"]: - raise ValueError( - "`namespace` needs to be passed as part of the function call " - "or exist in the `metadata` section of the yaml document." - ) + if not cluster_scoped: + if (namespace is None or namespace == "") and "namespace" not in doc["metadata"]: + raise ValueError( + "`namespace` needs to be passed as part of the function call " + "or exist in the `metadata` section of the yaml document." + ) + + if namespace is None: + namespace = doc["metadata"]["namespace"] + else: + doc["metadata"]["namespace"] = namespace + else: + namespace = "" if name is None: name = doc["metadata"]["name"] else: doc["metadata"]["name"] = name - if namespace is None: - namespace = doc["metadata"]["namespace"] - else: - doc["metadata"]["namespace"] = namespace - kind = doc["kind"] api_version = doc["apiVersion"] if "/" in api_version: diff --git a/docker/mongodb-kubernetes-tests/kubetester/mongodb_role.py b/docker/mongodb-kubernetes-tests/kubetester/mongodb_role.py new file mode 100644 index 000000000..b534bdf94 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/kubetester/mongodb_role.py @@ -0,0 +1,27 @@ +from typing import Optional + +from kubeobject import CustomObject +from kubetester.mongodb import MongoDBCommon, Phase, in_desired_state + +ClusterMongoDBRoleKind = "ClusterMongoDBRole" + + +class ClusterMongoDBRole(CustomObject, MongoDBCommon): + def __init__(self, *args, **kwargs): + with_defaults = { + "plural": "clustermongodbroles", + "kind": "ClusterMongoDBRole", + "group": "mongodb.com", + "version": "v1", + } + with_defaults.update(kwargs) + super(ClusterMongoDBRole, self).__init__(*args, **with_defaults) + + def get_name(self) -> str: + return self["metadata"]["name"] + + def get_role_name(self): + return self["spec"]["role"] + + def get_role(self): + return self["spec"] diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/fixtures/cluster-mongodb-role.yaml b/docker/mongodb-kubernetes-tests/tests/authentication/fixtures/cluster-mongodb-role.yaml new file mode 100644 index 000000000..4edfee1e1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/fixtures/cluster-mongodb-role.yaml @@ -0,0 +1,28 @@ +apiVersion: mongodb.com/v1 +kind: ClusterMongoDBRole +metadata: + name: test-customrole +spec: + role: "test-customrole" + db: "admin" + roles: + - db: "admin" + role: "root" + privileges: + - resource: + db: "admin" + collection: "system.users" + actions: + - "find" + - "update" + - resource: + db: "admin" + collection: "system.roles" + actions: + - "find" + - "update" + authenticationRestrictions: + - clientSource: + - "127.0.0.0/8" + serverAddress: + - "10.0.0.0/8" diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py new file mode 100644 index 000000000..00f7cf963 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/authentication/mongodb_custom_roles.py @@ -0,0 +1,258 @@ +from kubetester import ( + create_or_update_configmap, + find_fixture, + random_k8s_name, + read_configmap, + try_load, + wait_until, +) +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_multi import MongoDBMulti +from kubetester.mongodb_role import ClusterMongoDBRole, ClusterMongoDBRoleKind +from pytest import fixture, mark +from tests.multicluster.conftest import cluster_spec_list + + +@fixture(scope="module") +def project_name_prefix(namespace: str) -> str: + return random_k8s_name(f"{namespace}-project-") + + +@fixture(scope="module") +def first_project(namespace: str, project_name_prefix: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{project_name_prefix}-first" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="module") +def second_project(namespace: str, project_name_prefix: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{project_name_prefix}-second" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="module") +def third_project(namespace: str, project_name_prefix: str) -> str: + cm = read_configmap(namespace=namespace, name="my-project") + project_name = f"{project_name_prefix}-third" + return create_or_update_configmap( + namespace=namespace, + name=project_name, + data={ + "baseUrl": cm["baseUrl"], + "projectName": project_name, + "orgId": cm["orgId"], + }, + ) + + +@fixture(scope="module") +def mongodb_role(): + resource = ClusterMongoDBRole.from_yaml(find_fixture("cluster-mongodb-role.yaml"), cluster_scoped=True) + + if try_load(resource): + return resource + + return resource.update() + + +@fixture(scope="module") +def replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, first_project: str) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("replica-set-scram.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["members"] = 1 + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role.get_name(), + "kind": ClusterMongoDBRoleKind, + } + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = first_project + + return resource + + +@fixture(scope="module") +def sharded_cluster(namespace: str, mongodb_role: ClusterMongoDBRole, second_project: str) -> MongoDB: + resource = MongoDB.from_yaml(find_fixture("sharded-cluster-scram-sha-1.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["mongodsPerShardCount"] = 1 + resource["spec"]["mongosCount"] = 1 + resource["spec"]["configServerCount"] = 1 + + resource["spec"]["security"]["roleRefs"] = [ + { + "name": mongodb_role.get_name(), + "kind": ClusterMongoDBRoleKind, + } + ] + resource["spec"]["opsManager"]["configMapRef"]["name"] = second_project + + return resource + + +@fixture(scope="module") +def mc_replica_set(namespace: str, mongodb_role: ClusterMongoDBRole, third_project: str) -> MongoDBMulti: + resource = MongoDBMulti.from_yaml(find_fixture("mongodb-multi.yaml"), namespace=namespace) + + if try_load(resource): + return resource + + resource["spec"]["security"] = { + "roleRefs": [ + { + "name": mongodb_role.get_name(), + "kind": ClusterMongoDBRoleKind, + } + ] + } + resource["spec"]["opsManager"]["configMapRef"]["name"] = third_project + resource["spec"]["clusterSpecList"] = cluster_spec_list(["kind-e2e-cluster-1"], [1]) + + return resource + + +@mark.e2e_mongodb_custom_roles +def test_create_resources( + mongodb_role: ClusterMongoDBRole, replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti +): + replica_set.update() + sharded_cluster.update() + mc_replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running, timeout=400) + sharded_cluster.assert_reaches_phase(Phase.Running, timeout=400) + mc_replica_set.assert_reaches_phase(Phase.Running, timeout=400) + + +@mark.e2e_mongodb_custom_roles +def test_automation_config_has_roles( + replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti, mongodb_role: ClusterMongoDBRole +): + rs_tester = replica_set.get_automation_config_tester() + rs_tester.assert_has_expected_number_of_roles(expected_roles=1) + rs_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) + + sc_tester = sharded_cluster.get_automation_config_tester() + sc_tester.assert_has_expected_number_of_roles(expected_roles=1) + sc_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) + + mcrs_tester = mc_replica_set.get_automation_config_tester() + mcrs_tester.assert_has_expected_number_of_roles(expected_roles=1) + mcrs_tester.assert_expected_role(role_index=0, expected_value=mongodb_role.get_role()) + + +@mark.e2e_mongodb_custom_roles +def test_changing_role( + replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti, mongodb_role: ClusterMongoDBRole +): + rs_version = replica_set.get_automation_config_tester().automation_config["version"] + sc_version = sharded_cluster.get_automation_config_tester().automation_config["version"] + mcrs_version = mc_replica_set.get_automation_config_tester().automation_config["version"] + + mongodb_role["spec"]["roles"][0]["role"] = "readWrite" + mongodb_role.update() + + wait_until(lambda: replica_set.get_automation_config_tester().reached_version(rs_version + 1), timeout=120) + wait_until(lambda: sharded_cluster.get_automation_config_tester().reached_version(sc_version + 1), timeout=120) + wait_until(lambda: mc_replica_set.get_automation_config_tester().reached_version(mcrs_version + 1), timeout=120) + + replica_set.get_automation_config_tester().assert_expected_role( + role_index=0, expected_value=mongodb_role.get_role() + ) + sharded_cluster.get_automation_config_tester().assert_expected_role( + role_index=0, expected_value=mongodb_role.get_role() + ) + mc_replica_set.get_automation_config_tester().assert_expected_role( + role_index=0, expected_value=mongodb_role.get_role() + ) + + +@mark.e2e_mongodb_custom_roles +def test_deleting_role_does_not_remove_access( + mongodb_role: ClusterMongoDBRole, replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti +): + mongodb_role.delete() + + assert try_load(mongodb_role) == False + + replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" + ) + sharded_cluster.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" + ) + mc_replica_set.assert_reaches_phase( + phase=Phase.Failed, msg_regexp=f"ClusterMongoDBRole '{mongodb_role.get_name()}' not found" + ) + + # The role should still exist in the automation config + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) + sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) + mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=1) + + +@mark.e2e_mongodb_custom_roles +def test_removing_role_from_resources(replica_set: MongoDB, sharded_cluster: MongoDB, mc_replica_set: MongoDBMulti): + sc_version = sharded_cluster.get_automation_config_tester().automation_config["version"] + mcrs_version = mc_replica_set.get_automation_config_tester().automation_config["version"] + + sharded_cluster["spec"]["security"]["roleRefs"] = None + sharded_cluster.update() + + mc_replica_set["spec"]["security"]["roleRefs"] = None + mc_replica_set.update() + + wait_until(lambda: sharded_cluster.get_automation_config_tester().reached_version(sc_version + 1), timeout=120) + wait_until(lambda: mc_replica_set.get_automation_config_tester().reached_version(mcrs_version + 1), timeout=120) + + sharded_cluster.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) + mc_replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) + + +@mark.e2e_mongodb_custom_roles +def test_install_operator_with_clustermongodbroles_disabled(multi_cluster_operator_no_cluster_mongodb_roles): + multi_cluster_operator_no_cluster_mongodb_roles.assert_is_running() + + +@mark.e2e_mongodb_custom_roles +def test_replicaset_is_failed(replica_set: MongoDB): + replica_set.assert_reaches_phase( + Phase.Failed, + msg_regexp="RoleRefs are not supported when ClusterMongoDBRoles are disabled. Please enable ClusterMongoDBRoles in the operator configuration.", + ) + + +@mark.e2e_mongodb_custom_roles +def test_replicaset_is_reconciled_without_rolerefs(replica_set: MongoDB): + rs_version = replica_set.get_automation_config_tester().automation_config["version"] + replica_set["spec"]["security"]["roleRefs"] = None + replica_set.update() + + replica_set.assert_reaches_phase(Phase.Running) + wait_until(lambda: replica_set.get_automation_config_tester().reached_version(rs_version + 1), timeout=120) + + replica_set.get_automation_config_tester().assert_has_expected_number_of_roles(expected_roles=0) diff --git a/docker/mongodb-kubernetes-tests/tests/authentication/replica_set_custom_roles.py b/docker/mongodb-kubernetes-tests/tests/authentication/replica_set_ldap_custom_roles.py similarity index 94% rename from docker/mongodb-kubernetes-tests/tests/authentication/replica_set_custom_roles.py rename to docker/mongodb-kubernetes-tests/tests/authentication/replica_set_ldap_custom_roles.py index 3c0ed9e96..8dcb34579 100644 --- a/docker/mongodb-kubernetes-tests/tests/authentication/replica_set_custom_roles.py +++ b/docker/mongodb-kubernetes-tests/tests/authentication/replica_set_ldap_custom_roles.py @@ -52,12 +52,12 @@ def ldap_user_mongodb( return user -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles def test_replica_set(replica_set: MongoDB): replica_set.assert_reaches_phase(Phase.Running, timeout=400) -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles def test_create_ldap_user(replica_set: MongoDB, ldap_user_mongodb: MongoDBUser): ldap_user_mongodb.assert_reaches_phase(Phase.Updated) @@ -66,7 +66,7 @@ def test_create_ldap_user(replica_set: MongoDB, ldap_user_mongodb: MongoDBUser): ac.assert_expected_users(1) -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles def test_new_ldap_users_can_write_to_database(replica_set: MongoDB, ldap_user_mongodb: MongoDBUser): tester = replica_set.tester() @@ -79,7 +79,7 @@ def test_new_ldap_users_can_write_to_database(replica_set: MongoDB, ldap_user_mo ) -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_new_ldap_users_can_write_to_other_collection(replica_set: MongoDB, ldap_user_mongodb: MongoDBUser): tester = replica_set.tester() @@ -93,7 +93,7 @@ def test_new_ldap_users_can_write_to_other_collection(replica_set: MongoDB, ldap ) -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles @mark.xfail(reason="The user should not be able to write to a database/collection it is not authorized to write on") def test_new_ldap_users_can_write_to_other_database(replica_set: MongoDB, ldap_user_mongodb: MongoDBUser): tester = replica_set.tester() @@ -106,7 +106,7 @@ def test_new_ldap_users_can_write_to_other_database(replica_set: MongoDB, ldap_u ) -@mark.e2e_replica_set_custom_roles +@mark.e2e_replica_set_ldap_custom_roles def test_automation_config_has_roles(replica_set: MongoDB): tester = replica_set.get_automation_config_tester() diff --git a/docker/mongodb-kubernetes-tests/tests/conftest.py b/docker/mongodb-kubernetes-tests/tests/conftest.py index 06721556a..43c11e53c 100644 --- a/docker/mongodb-kubernetes-tests/tests/conftest.py +++ b/docker/mongodb-kubernetes-tests/tests/conftest.py @@ -723,6 +723,37 @@ def multi_cluster_operator_manual_remediation( ) +@fixture(scope="module") +def multi_cluster_operator_no_cluster_mongodb_roles( + namespace: str, + central_cluster_name: str, + multi_cluster_operator_installation_config: dict[str, str], + central_cluster_client: client.ApiClient, + member_cluster_clients: List[MultiClusterClient], + member_cluster_names: List[str], + apply_crds_first: bool = False, +) -> Operator: + os.environ["HELM_KUBECONTEXT"] = central_cluster_name + + # when running with the local operator, this is executed by scripts/dev/prepare_local_e2e_run.sh + if not local_operator(): + run_kube_config_creation_tool(member_cluster_names, namespace, namespace, member_cluster_names) + return _install_multi_cluster_operator( + namespace, + multi_cluster_operator_installation_config, + central_cluster_client, + member_cluster_clients, + { + "operator.name": MULTI_CLUSTER_OPERATOR_NAME, + # override the serviceAccountName for the operator deployment + "operator.createOperatorServiceAccount": "false", + "operator.enableClusterMongoDBRoles": "false", + }, + central_cluster_name, + apply_crds_first=apply_crds_first, + ) + + def get_multi_cluster_operator_clustermode(namespace: str) -> Operator: os.environ["HELM_KUBECONTEXT"] = get_central_cluster_name() run_kube_config_creation_tool( diff --git a/docker/mongodb-kubernetes-tests/tests/mixed/crd_validation.py b/docker/mongodb-kubernetes-tests/tests/mixed/crd_validation.py index 9eceef7a5..b693c9169 100644 --- a/docker/mongodb-kubernetes-tests/tests/mixed/crd_validation.py +++ b/docker/mongodb-kubernetes-tests/tests/mixed/crd_validation.py @@ -37,3 +37,9 @@ def test_opsmanagers_crd_is_valid(crd_api: ApiextensionsV1Api): def test_mongodbmulti_crd_is_valid(crd_api: ApiextensionsV1Api): resource = crd_api.read_custom_resource_definition("mongodbmulticluster.mongodb.com") assert crd_has_expected_conditions(resource) + + +@mark.e2e_crd_validation +def test_cluster_mongodb_roles_crd_is_valid(crd_api: ApiextensionsV1Api): + resource = crd_api.read_custom_resource_definition("clustermongodbroles.mongodb.com") + assert crd_has_expected_conditions(resource) diff --git a/docker/mongodb-kubernetes-tests/tests/webhooks/e2e_mongodb_roles_validation_webhook.py b/docker/mongodb-kubernetes-tests/tests/webhooks/e2e_mongodb_roles_validation_webhook.py index ffb071af2..3def6afcf 100644 --- a/docker/mongodb-kubernetes-tests/tests/webhooks/e2e_mongodb_roles_validation_webhook.py +++ b/docker/mongodb-kubernetes-tests/tests/webhooks/e2e_mongodb_roles_validation_webhook.py @@ -2,18 +2,27 @@ from kubernetes import client from kubernetes.client.rest import ApiException from kubetester.kubetester import fixture as yaml_fixture -from kubetester.mongodb import MongoDB +from kubetester.mongodb import MongoDB, Phase +from kubetester.mongodb_role import ClusterMongoDBRole from kubetester.operator import Operator from pytest import fixture @fixture(scope="function") -def mdb(namespace: str, custom_mdb_version: str) -> str: +def mdb(namespace: str, custom_mdb_version: str) -> MongoDB: resource = MongoDB.from_yaml(yaml_fixture("role-validation-base.yaml"), namespace=namespace) resource.set_version(custom_mdb_version) return resource +@fixture(scope="function") +def mdbr() -> ClusterMongoDBRole: + resource = ClusterMongoDBRole.from_yaml( + yaml_fixture("cluster_mongodb_role_base.yaml"), namespace="", cluster_scoped=True + ) + return resource + + @pytest.mark.e2e_mongodb_roles_validation_webhook def test_wait_for_webhook(namespace: str, default_operator: Operator): default_operator.wait_for_webhook() @@ -21,190 +30,212 @@ def test_wait_for_webhook(namespace: str, default_operator: Operator): # Basic testing for invalid empty values @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_empty_role_name(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "", - "db": "admin", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - Cannot create a role with an empty name", - ): - mdb.create() +def test_empty_role_name(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + } + + err_msg = "Cannot create a role with an empty name" + + _assert_role_error(mdb, mdbr, role, err_msg) @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_empty_db_name(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - Cannot create a role with an empty db", - ): - mdb.create() +def test_empty_db_name(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + } + + err_msg = "Cannot create a role with an empty db" + + _assert_role_error(mdb, mdbr, role, err_msg) @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_inherited_role_empty_name(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "admin", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - "roles": [{"db": "admin", "role": ""}], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - Cannot inherit from a role with an empty name", - ): - mdb.create() +def test_inherited_role_empty_name(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + "roles": [{"db": "admin", "role": ""}], + } + + err_msg = "Cannot inherit from a role with an empty name" + + _assert_role_error(mdb, mdbr, role, err_msg) @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_inherited_role_empty_db(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "admin", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - "roles": [{"db": "", "role": "role"}], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - Cannot inherit from a role with an empty db", - ): - mdb.create() +def test_inherited_role_empty_db(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + "roles": [{"db": "", "role": "role"}], + } + + err_msg = "Cannot inherit from a role with an empty db" + + _assert_role_error(mdb, mdbr, role, err_msg) # Testing for invalid authentication Restrictions @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_invalid_client_source(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "admin", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - "authenticationRestrictions": [{"clientSource": ["355.127.0.1"]}], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - AuthenticationRestriction is invalid - clientSource 355.127.0.1 is neither a valid IP address nor a valid CIDR range", - ): - mdb.create() +def test_invalid_client_source(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + "authenticationRestrictions": [{"clientSource": ["355.127.0.1"]}], + } + + err_msg = "AuthenticationRestriction is invalid - clientSource 355.127.0.1 is neither a valid IP address nor a valid CIDR range" + + _assert_role_error(mdb, mdbr, role, err_msg) @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_invalid_server_address(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "admin", - "privileges": [ - { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - "authenticationRestrictions": [{"serverAddress": ["355.127.0.1"]}], - } - ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - AuthenticationRestriction is invalid - serverAddress 355.127.0.1 is neither a valid IP address nor a valid CIDR range", - ): - mdb.create() +def test_invalid_server_address(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + "authenticationRestrictions": [{"serverAddress": ["355.127.0.1"]}], + } + + err_msg = "AuthenticationRestriction is invalid - serverAddress 355.127.0.1 is neither a valid IP address nor a valid CIDR range" + + _assert_role_error(mdb, mdbr, role, err_msg) # Testing for invalid privileges @pytest.mark.e2e_mongodb_roles_validation_webhook -def test_invalid_cluster_and_db_collection(mdb: str): +def test_invalid_cluster_and_db_collection(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insert"], + "resource": {"collection": "foo", "db": "admin", "cluster": True}, + } + ], + } + + err_msg = "Privilege is invalid - Cluster: true is not compatible with setting db/collection" + + _assert_role_error(mdb, mdbr, role, err_msg) + + +@pytest.mark.e2e_mongodb_roles_validation_webhook +def test_invalid_cluster_not_true(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [{"actions": ["insert"], "resource": {"cluster": False}}], + } + + err_msg = "Privilege is invalid - The only valid value for privilege.cluster, if set, is true" + + _assert_role_error(mdb, mdbr, role, err_msg) + + +@pytest.mark.e2e_mongodb_roles_validation_webhook +def test_invalid_action(mdb: MongoDB, mdbr: ClusterMongoDBRole): + role = { + "role": "role", + "db": "admin", + "privileges": [ + { + "actions": ["insertFoo"], + "resource": {"collection": "foo", "db": "admin"}, + } + ], + } + err_msg = "Privilege is invalid - Actions are not valid - insertFoo is not a valid db action" + + _assert_role_error(mdb, mdbr, role, err_msg) + + +@pytest.mark.e2e_mongodb_roles_validation_webhook +def test_roles_and_role_refs(mdb: MongoDB): mdb["spec"]["security"]["roles"] = [ { "role": "role", "db": "admin", - "privileges": [ + "roles": [ { - "actions": ["insert"], - "resource": {"collection": "foo", "db": "admin", "cluster": True}, + "role": "root", + "db": "admin", } ], } ] - with pytest.raises( - client.rest.ApiException, - match="Error validating role - Privilege is invalid - Cluster: true is not compatible with setting db/collection", - ): - mdb.create() - - -@pytest.mark.e2e_mongodb_roles_validation_webhook -def test_invalid_cluster_not_true(mdb: str): - mdb["spec"]["security"]["roles"] = [ + mdb["spec"]["security"]["roleRefs"] = [ { - "role": "role", - "db": "admin", - "privileges": [{"actions": ["insert"], "resource": {"cluster": False}}], + "name": "test-clusterrole", + "kind": "ClusterMongoDBRole", } ] with pytest.raises( client.rest.ApiException, - match="Error validating role - Privilege is invalid - The only valid value for privilege.cluster, if set, is true", + match="At most one of roles or roleRefs can be non-empty", ): mdb.create() -@pytest.mark.e2e_mongodb_roles_validation_webhook -def test_invalid_action(mdb: str): - mdb["spec"]["security"]["roles"] = [ - { - "role": "role", - "db": "admin", - "privileges": [ - { - "actions": ["insertFoo"], - "resource": {"collection": "foo", "db": "admin"}, - } - ], - } - ] +def _assert_role_error(mdb: MongoDB, mdbr: ClusterMongoDBRole, role, err_msg): + mdb["spec"]["security"]["roles"] = [role] + with pytest.raises( client.rest.ApiException, - match="Error validating role - Privilege is invalid - Actions are not valid - insertFoo is not a valid db action", + match=f"Error validating role - {err_msg}", ): mdb.create() + + mdbr["spec"] = role + mdbr.create() + mdb["spec"]["security"]["roles"] = [] + mdb["spec"]["security"]["roleRefs"] = [ + {"name": mdbr.get_name(), "kind": mdbr.kind}, + ] + + mdb.create() + mdb.assert_reaches_phase(phase=Phase.Failed, msg_regexp=f"Error validating role '{mdbr.get_name()}' - {err_msg}") + + mdb.delete() + mdbr.delete() diff --git a/docker/mongodb-kubernetes-tests/tests/webhooks/fixtures/cluster_mongodb_role_base.yaml b/docker/mongodb-kubernetes-tests/tests/webhooks/fixtures/cluster_mongodb_role_base.yaml new file mode 100644 index 000000000..8086e354a --- /dev/null +++ b/docker/mongodb-kubernetes-tests/tests/webhooks/fixtures/cluster_mongodb_role_base.yaml @@ -0,0 +1,10 @@ +apiVersion: mongodb.com/v1 +kind: ClusterMongoDBRole +metadata: + name: test-customrole +spec: + role: "" + db: "" + roles: [] + privileges: [] + authenticationRestrictions: [] diff --git a/helm_chart/crds/mongodb.com_clustermongodbroles.yaml b/helm_chart/crds/mongodb.com_clustermongodbroles.yaml new file mode 100644 index 000000000..9241b7dad --- /dev/null +++ b/helm_chart/crds/mongodb.com_clustermongodbroles.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clustermongodbroles.mongodb.com +spec: + group: mongodb.com + names: + kind: ClusterMongoDBRole + listKind: ClusterMongoDBRoleList + plural: clustermongodbroles + shortNames: + - cmdbr + singular: clustermongodbrole + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The time since the MongoDB Custom Role resource was created. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterMongoDBRole is the Schema for the clustermongodbroles + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterMongoDBRoleSpec defines the desired state of ClusterMongoDBRole. + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + - resource + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - role + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: {} diff --git a/helm_chart/crds/mongodb.com_mongodb.yaml b/helm_chart/crds/mongodb.com_mongodb.yaml index bcee049d3..1e93e3116 100644 --- a/helm_chart/crds/mongodb.com_mongodb.yaml +++ b/helm_chart/crds/mongodb.com_mongodb.yaml @@ -1606,6 +1606,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -1685,6 +1699,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: |- DEPRECATED please use `spec.statefulSet.spec.serviceName` to provide a custom service name. diff --git a/helm_chart/crds/mongodb.com_mongodbmulticluster.yaml b/helm_chart/crds/mongodb.com_mongodbmulticluster.yaml index 7e8a77784..210155079 100644 --- a/helm_chart/crds/mongodb.com_mongodbmulticluster.yaml +++ b/helm_chart/crds/mongodb.com_mongodbmulticluster.yaml @@ -866,6 +866,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -945,6 +959,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' statefulSet: description: |- StatefulSetConfiguration provides the statefulset override for each of the cluster's statefulset diff --git a/helm_chart/crds/mongodb.com_opsmanagers.yaml b/helm_chart/crds/mongodb.com_opsmanagers.yaml index 8d1efe649..93e3a94f7 100644 --- a/helm_chart/crds/mongodb.com_opsmanagers.yaml +++ b/helm_chart/crds/mongodb.com_opsmanagers.yaml @@ -928,6 +928,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -1007,6 +1021,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: this is an optional service, it will get the name "-svc" in case not provided diff --git a/helm_chart/templates/operator-roles.yaml b/helm_chart/templates/operator-roles.yaml index dace37467..09b8dcefb 100644 --- a/helm_chart/templates/operator-roles.yaml +++ b/helm_chart/templates/operator-roles.yaml @@ -142,6 +142,36 @@ subjects: namespace: {{ include "mongodb-kubernetes-operator.namespace" $ }} {{- end }} +--- + +{{- if .Values.operator.enableClusterMongoDBRoles }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.operator.name }}-{{ include "mongodb-kubernetes-operator.namespace" . }}-cluster-mongodb-role +rules: + - apiGroups: + - mongodb.com + verbs: + - '*' + resources: + - clustermongodbroles +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.operator.name }}-{{ include "mongodb-kubernetes-operator.namespace" . }}-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.operator.name }}-{{ include "mongodb-kubernetes-operator.namespace" . }}-cluster-mongodb-role +subjects: + - kind: ServiceAccount + name: {{ .Values.operator.name }} + namespace: {{ include "mongodb-kubernetes-operator.namespace" . }} + +{{- end }} + {{- end }} --- diff --git a/helm_chart/templates/operator.yaml b/helm_chart/templates/operator.yaml index 8d523fbd1..ba8e301a7 100644 --- a/helm_chart/templates/operator.yaml +++ b/helm_chart/templates/operator.yaml @@ -53,6 +53,9 @@ spec: {{- if .Values.multiCluster.clusters }} - -watch-resource=mongodbmulticluster {{- end }} + {{- if .Values.operator.enableClusterMongoDBRoles }} + - -watch-resource=clustermongodbroles + {{- end }} {{- range .Values.operator.additionalArguments }} - {{ . }} {{- end }} diff --git a/helm_chart/values.yaml b/helm_chart/values.yaml index ca91db672..6f67e36fe 100644 --- a/helm_chart/values.yaml +++ b/helm_chart/values.yaml @@ -66,6 +66,11 @@ operator: # kubectl mongodb plugin is used to configure multi-cluster resources createResourcesServiceAccountsAndRoles: true + # If true, the helm chart will create the ClusterRole and ClusterRoleBinding for the operator to be able to access the ClusterMongoDBRole resources. + # It will also set the --watch-resource flag, to enable the operator to watch the ClusterMongoDBRole resources for changes. + # Set to false to not create the ClusterRole and ClusterRoleBinding and to disable the operator watching the ClusterMongoDBRole resources. + enableClusterMongoDBRoles: true + # Set to false to not create the RBAC for enabling access to the PVC for resizing for the operator enablePVCResize: true diff --git a/main.go b/main.go index 618ff645e..b69a76791 100644 --- a/main.go +++ b/main.go @@ -62,6 +62,7 @@ const ( mongoDBMultiClusterCRDPlural = "mongodbmulticluster" mongoDBCommunityCRDPlural = "mongodbcommunity" mongoDBSearchCRDPlural = "mongodbsearch" + clusterMongoDBRoleCRDPlural = "clustermongodbroles" ) var ( @@ -105,7 +106,14 @@ func main() { flag.Parse() // If no CRDs are specified, we set default to non-multicluster CRDs if len(crds) == 0 { - crds = crdsToWatch{mongoDBCRDPlural, mongoDBUserCRDPlural, mongoDBOpsManagerCRDPlural, mongoDBCommunityCRDPlural, mongoDBSearchCRDPlural} + crds = crdsToWatch{ + mongoDBCRDPlural, + mongoDBUserCRDPlural, + mongoDBOpsManagerCRDPlural, + mongoDBCommunityCRDPlural, + mongoDBSearchCRDPlural, + clusterMongoDBRoleCRDPlural, + } } ctx := context.Background() @@ -123,6 +131,8 @@ func main() { // Namespace where the operator is installed currentNamespace := env.ReadOrPanic(util.CurrentNamespace) + enableClusterMongoDBRoles := slices.Contains(crds, clusterMongoDBRoleCRDPlural) + // Get a config to talk to the apiserver cfg := ctrl.GetConfigOrDie() @@ -219,7 +229,7 @@ func main() { // Setup all Controllers if slices.Contains(crds, mongoDBCRDPlural) { - if err := setupMongoDBCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, memberClusterObjectsMap); err != nil { + if err := setupMongoDBCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { log.Fatal(err) } } @@ -234,7 +244,7 @@ func main() { } } if slices.Contains(crds, mongoDBMultiClusterCRDPlural) { - if err := setupMongoDBMultiClusterCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, memberClusterObjectsMap); err != nil { + if err := setupMongoDBMultiClusterCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { log.Fatal(err) } } @@ -296,14 +306,14 @@ func main() { } } -func setupMongoDBCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { - if err := operator.AddStandaloneController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise); err != nil { +func setupMongoDBCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { + if err := operator.AddStandaloneController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles); err != nil { return err } - if err := operator.AddReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise); err != nil { + if err := operator.AddReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles); err != nil { return err } - if err := operator.AddShardedClusterController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, memberClusterObjectsMap); err != nil { + if err := operator.AddShardedClusterController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { return err } return ctrl.NewWebhookManagedBy(mgr).For(&mdbv1.MongoDB{}).Complete() @@ -320,8 +330,8 @@ func setupMongoDBUserCRD(ctx context.Context, mgr manager.Manager, memberCluster return operator.AddMongoDBUserController(ctx, mgr, memberClusterObjectsMap) } -func setupMongoDBMultiClusterCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { - if err := operator.AddMultiReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, memberClusterObjectsMap); err != nil { +func setupMongoDBMultiClusterCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { + if err := operator.AddMultiReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { return err } return ctrl.NewWebhookManagedBy(mgr).For(&mdbmultiv1.MongoDBMultiCluster{}).Complete() diff --git a/pkg/util/constants.go b/pkg/util/constants.go index b2082f6ee..9ed9d94eb 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -30,6 +30,9 @@ const ( // MongoDbSearchController name of the MongoDBSearch controller MongoDbSearchController = "mongodbsearch-controller" + // Kinds + ClusterMongoDBRoleKind = "ClusterMongoDBRole" + // Ops manager config map and secret variables OmBaseUrl = "baseUrl" OmOrgId = "orgId" @@ -313,7 +316,7 @@ const ( MdbAppdbAssumeOldFormat = "MDB_APPDB_ASSUME_OLD_FORMAT" - Finalizer = "mongodb.com/v1.userRemovalFinalizer" + UserFinalizer = "mongodb.com/v1.userRemovalFinalizer" ) type OperatorEnvironment string diff --git a/public/crds.yaml b/public/crds.yaml index e8bb32fdf..31015b6d5 100644 --- a/public/crds.yaml +++ b/public/crds.yaml @@ -1,6 +1,114 @@ --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.15.0 + name: clustermongodbroles.mongodb.com +spec: + group: mongodb.com + names: + kind: ClusterMongoDBRole + listKind: ClusterMongoDBRoleList + plural: clustermongodbroles + shortNames: + - cmdbr + singular: clustermongodbrole + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The time since the MongoDB Custom Role resource was created. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: ClusterMongoDBRole is the Schema for the clustermongodbroles + API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ClusterMongoDBRoleSpec defines the desired state of ClusterMongoDBRole. + properties: + authenticationRestrictions: + items: + properties: + clientSource: + items: + type: string + type: array + serverAddress: + items: + type: string + type: array + type: object + type: array + db: + type: string + privileges: + items: + properties: + actions: + items: + type: string + type: array + resource: + properties: + cluster: + type: boolean + collection: + type: string + db: + type: string + type: object + required: + - actions + - resource + type: object + type: array + role: + type: string + roles: + items: + properties: + db: + type: string + role: + type: string + required: + - db + - role + type: object + type: array + required: + - db + - role + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.15.0 @@ -1606,6 +1714,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -1685,6 +1807,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: |- DEPRECATED please use `spec.statefulSet.spec.serviceName` to provide a custom service name. @@ -4241,6 +4367,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -4320,6 +4460,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' statefulSet: description: |- StatefulSetConfiguration provides the statefulset override for each of the cluster's statefulset @@ -5887,6 +6031,20 @@ spec: type: object certsSecretPrefix: type: string + roleRefs: + items: + properties: + kind: + enum: + - ClusterMongoDBRole + type: string + name: + type: string + required: + - kind + - name + type: object + type: array roles: items: properties: @@ -5966,6 +6124,10 @@ spec: type: boolean type: object type: object + x-kubernetes-validations: + - message: At most one of roles or roleRefs can be non-empty + rule: '!(has(self.roles) && has(self.roleRefs)) || !(self.roles.size() + > 0 && self.roleRefs.size() > 0)' service: description: this is an optional service, it will get the name "-svc" in case not provided diff --git a/public/mongodb-kubernetes-multi-cluster.yaml b/public/mongodb-kubernetes-multi-cluster.yaml index 3fd6f93c6..4873bd563 100644 --- a/public/mongodb-kubernetes-multi-cluster.yaml +++ b/public/mongodb-kubernetes-multi-cluster.yaml @@ -2,6 +2,19 @@ # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-multi-cluster-mongodb-cluster-mongodb-role +rules: + - apiGroups: + - mongodb.com + verbs: + - '*' + resources: + - clustermongodbroles +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-mongodb-webhook rules: @@ -57,6 +70,20 @@ rules: # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-multi-cluster-mongodb-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-kubernetes-operator-multi-cluster-mongodb-cluster-mongodb-role +subjects: + - kind: ServiceAccount + name: mongodb-kubernetes-operator-multi-cluster + namespace: mongodb +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-multi-cluster-mongodb-webhook-binding roleRef: @@ -291,6 +318,7 @@ spec: - -watch-resource=mongodbcommunity - -watch-resource=mongodbsearch - -watch-resource=mongodbmulticluster + - -watch-resource=clustermongodbroles command: - /usr/local/bin/mongodb-kubernetes-operator volumeMounts: diff --git a/public/mongodb-kubernetes-openshift.yaml b/public/mongodb-kubernetes-openshift.yaml index 7a8362263..a8fba44e2 100644 --- a/public/mongodb-kubernetes-openshift.yaml +++ b/public/mongodb-kubernetes-openshift.yaml @@ -2,6 +2,19 @@ # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +rules: + - apiGroups: + - mongodb.com + verbs: + - '*' + resources: + - clustermongodbroles +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-mongodb-webhook rules: @@ -57,6 +70,20 @@ rules: # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +subjects: + - kind: ServiceAccount + name: mongodb-kubernetes-operator + namespace: mongodb +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-mongodb-webhook-binding roleRef: @@ -287,6 +314,7 @@ spec: - -watch-resource=mongodbusers - -watch-resource=mongodbcommunity - -watch-resource=mongodbsearch + - -watch-resource=clustermongodbroles command: - /usr/local/bin/mongodb-kubernetes-operator resources: diff --git a/public/mongodb-kubernetes.yaml b/public/mongodb-kubernetes.yaml index 40e5a29f4..55e386cd7 100644 --- a/public/mongodb-kubernetes.yaml +++ b/public/mongodb-kubernetes.yaml @@ -2,6 +2,19 @@ # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +rules: + - apiGroups: + - mongodb.com + verbs: + - '*' + resources: + - clustermongodbroles +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-mongodb-webhook rules: @@ -57,6 +70,20 @@ rules: # Source: mongodb-kubernetes/templates/operator-roles.yaml kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-kubernetes-operator-mongodb-cluster-mongodb-role +subjects: + - kind: ServiceAccount + name: mongodb-kubernetes-operator + namespace: mongodb +--- +# Source: mongodb-kubernetes/templates/operator-roles.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: name: mongodb-kubernetes-operator-mongodb-webhook-binding roleRef: @@ -290,6 +317,7 @@ spec: - -watch-resource=mongodbusers - -watch-resource=mongodbcommunity - -watch-resource=mongodbsearch + - -watch-resource=clustermongodbroles command: - /usr/local/bin/mongodb-kubernetes-operator resources: diff --git a/public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml b/public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml index 0c8d0e4e4..b82820ea8 100644 --- a/public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml +++ b/public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml @@ -1,6 +1,22 @@ # Central Cluster, cluster-scoped resources apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole +metadata: + creationTimestamp: null + labels: + multi-cluster: "true" + name: mongodb-kubernetes-operator-central-namespace-multi-cluster-mongodb-role +rules: +- apiGroups: + - mongodb.com + resources: + - clustermongodbroles + verbs: + - '*' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole metadata: creationTimestamp: null labels: @@ -122,6 +138,23 @@ rules: # Central Cluster, cluster-scoped resources apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + multi-cluster: "true" + name: mongodb-kubernetes-operator-central-namespace-multi-cluster-mongodb-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: mongodb-kubernetes-operator-central-namespace-multi-cluster-mongodb-role +subjects: +- kind: ServiceAccount + name: mongodb-kubernetes-operator-multicluster + namespace: central-namespace + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: creationTimestamp: null labels: diff --git a/public/tools/multicluster/cmd/setup.go b/public/tools/multicluster/cmd/setup.go index 207c60669..881b2f5c8 100644 --- a/public/tools/multicluster/cmd/setup.go +++ b/public/tools/multicluster/cmd/setup.go @@ -25,6 +25,7 @@ func init() { setupCmd.Flags().BoolVar(&setupFlags.Cleanup, "cleanup", false, "Delete all previously created resources except for namespaces. [optional default: false]") setupCmd.Flags().BoolVar(&setupFlags.ClusterScoped, "cluster-scoped", false, "Create ClusterRole and ClusterRoleBindings for member clusters. [optional default: false]") setupCmd.Flags().BoolVar(&setupFlags.CreateTelemetryClusterRoles, "create-telemetry-roles", true, "Create ClusterRole and ClusterRoleBindings for member clusters for telemetry. [optional default: true]") + setupCmd.Flags().BoolVar(&setupFlags.CreateMongoDBRolesClusterRole, "create-mongodb-roles-cluster-role", true, "Create ClusterRole and ClusterRoleBinding for central cluster for ClusterMongoDBRole resources. [optional default: true]") setupCmd.Flags().BoolVar(&setupFlags.InstallDatabaseRoles, "install-database-roles", false, "Install the ServiceAccounts and Roles required for running database workloads in the member clusters. [optional default: false]") setupCmd.Flags().BoolVar(&setupFlags.CreateServiceAccountSecrets, "create-service-account-secrets", true, "Create service account token secrets. [optional default: true]") setupCmd.Flags().StringVar(&setupFlags.ImagePullSecrets, "image-pull-secrets", "", "Name of the secret for imagePullSecrets to set in created service accounts") diff --git a/public/tools/multicluster/pkg/common/common.go b/public/tools/multicluster/pkg/common/common.go index 450ac978e..bba97054a 100644 --- a/public/tools/multicluster/pkg/common/common.go +++ b/public/tools/multicluster/pkg/common/common.go @@ -42,20 +42,21 @@ const ( // Flags holds all the fields provided by the user. type Flags struct { - MemberClusters []string - MemberClusterApiServerUrls []string - ServiceAccount string - CentralCluster string - MemberClusterNamespace string - CentralClusterNamespace string - Cleanup bool - ClusterScoped bool - InstallDatabaseRoles bool - CreateTelemetryClusterRoles bool - OperatorName string - SourceCluster string - CreateServiceAccountSecrets bool - ImagePullSecrets string + MemberClusters []string + MemberClusterApiServerUrls []string + ServiceAccount string + CentralCluster string + MemberClusterNamespace string + CentralClusterNamespace string + Cleanup bool + ClusterScoped bool + InstallDatabaseRoles bool + CreateTelemetryClusterRoles bool + CreateMongoDBRolesClusterRole bool + OperatorName string + SourceCluster string + CreateServiceAccountSecrets bool + ImagePullSecrets string } const ( @@ -319,9 +320,6 @@ func EnsureMultiClusterResources(ctx context.Context, flags Flags, clientMap map } centralClusterClient := clientMap[flags.CentralCluster] - if err != nil { - return xerrors.Errorf("failed to get central cluster clientset: %w", err) - } if err := createKubeConfigSecret(ctx, centralClusterClient, kubeConfigBytes, flags); err != nil { return xerrors.Errorf("failed creating KubeConfig secret: %w", err) @@ -524,6 +522,22 @@ func buildClusterRoleTelemetry() rbacv1.ClusterRole { } } +func buildClusterRoleMongoDBRole(namespace string) rbacv1.ClusterRole { + return rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-multi-cluster-mongodb-role", DefaultOperatorName, namespace), + Labels: multiClusterLabels(), + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + Resources: []string{"clustermongodbroles"}, + APIGroups: []string{"mongodb.com"}, + }, + }, + } +} + // buildRoleBinding creates the RoleBinding which binds the Role to the given ServiceAccount. func buildRoleBinding(role rbacv1.Role, serviceAccount string, serviceAccountNamespace string) rbacv1.RoleBinding { return rbacv1.RoleBinding{ @@ -570,7 +584,7 @@ func buildClusterRoleBinding(clusterRole rbacv1.ClusterRole, serviceAccountName, } // createRoles creates the ServiceAccount and Roles, RoleBindings, ClusterRoles and ClusterRoleBindings required. -func createRoles(ctx context.Context, c KubeClient, serviceAccountName, serviceAccountNamespace, namespace string, clusterScoped, telemetryClusterRoles bool, clusterType clusterType) error { +func createRoles(ctx context.Context, c KubeClient, serviceAccountName, serviceAccountNamespace, namespace string, clusterScoped, telemetryClusterRoles bool, mongodbRolesClusterRole bool, clusterType clusterType) error { var err error if telemetryClusterRoles { @@ -592,6 +606,27 @@ func createRoles(ctx context.Context, c KubeClient, serviceAccountName, serviceA } + if clusterType == clusterTypeCentral && mongodbRolesClusterRole { + // Create ClusterRole to access the cluster-scoped resource ClusterMongoDBRole + clusterRoleForMongoDBRole := buildClusterRoleMongoDBRole(serviceAccountNamespace) + _, err = c.RbacV1().ClusterRoles().Create(ctx, &clusterRoleForMongoDBRole, metav1.CreateOptions{}) + if err != nil { + if errors.IsAlreadyExists(err) { + if _, err := c.RbacV1().ClusterRoles().Update(ctx, &clusterRoleForMongoDBRole, metav1.UpdateOptions{}); err != nil { + return xerrors.Errorf("error updating role: %w", err) + } + } else { + return xerrors.Errorf("error creating cluster role: %w", err) + } + } + + if err := createClusterRoleBinding(ctx, c, serviceAccountName, serviceAccountNamespace, + fmt.Sprintf("%s-%s-multi-cluster-mongodb-role-binding", DefaultOperatorName, serviceAccountNamespace), + clusterRoleForMongoDBRole); err != nil { + return err + } + } + if !clusterScoped { var role rbacv1.Role if clusterType == clusterTypeCentral { @@ -675,14 +710,14 @@ func createOperatorServiceAccountsAndRoles(ctx context.Context, clientMap map[st } } - if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, clusterTypeCentral); err != nil { + if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, f.CreateMongoDBRolesClusterRole, clusterTypeCentral); err != nil { return err } // in case the operator namespace (CentralClusterNamespace) is different from member cluster namespace we need // to provide roles and role binding to the operator's SA in member namespace if f.CentralClusterNamespace != f.MemberClusterNamespace { - if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, clusterTypeCentral); err != nil { + if err := createRoles(ctx, centralClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, f.CreateMongoDBRolesClusterRole, clusterTypeCentral); err != nil { return err } } @@ -705,10 +740,10 @@ func createOperatorServiceAccountsAndRoles(ctx context.Context, clientMap map[st } } - if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, clusterTypeMember); err != nil { + if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.MemberClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, f.CreateMongoDBRolesClusterRole, clusterTypeMember); err != nil { return err } - if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, clusterTypeMember); err != nil { + if err := createRoles(ctx, memberClusterClient, f.ServiceAccount, f.CentralClusterNamespace, f.CentralClusterNamespace, f.ClusterScoped, f.CreateTelemetryClusterRoles, f.CreateMongoDBRolesClusterRole, clusterTypeMember); err != nil { return err } } diff --git a/public/tools/multicluster/pkg/common/common_test.go b/public/tools/multicluster/pkg/common/common_test.go index 1dd769437..ab45b6c47 100644 --- a/public/tools/multicluster/pkg/common/common_test.go +++ b/public/tools/multicluster/pkg/common/common_test.go @@ -80,17 +80,18 @@ func testFlags(t *testing.T, cleanup bool) Flags { assert.NoError(t, err) return Flags{ - MemberClusterApiServerUrls: memberClusterApiServerUrls, - MemberClusters: memberClusters, - ServiceAccount: "mongodb-kubernetes-operator-multicluster", - CentralCluster: "central-cluster", - MemberClusterNamespace: "member-namespace", - CentralClusterNamespace: "central-namespace", - Cleanup: cleanup, - ClusterScoped: false, - CreateTelemetryClusterRoles: true, - OperatorName: DefaultOperatorName, - CreateServiceAccountSecrets: true, + MemberClusterApiServerUrls: memberClusterApiServerUrls, + MemberClusters: memberClusters, + ServiceAccount: "mongodb-kubernetes-operator-multicluster", + CentralCluster: "central-cluster", + MemberClusterNamespace: "member-namespace", + CentralClusterNamespace: "central-namespace", + Cleanup: cleanup, + ClusterScoped: false, + CreateTelemetryClusterRoles: true, + CreateMongoDBRolesClusterRole: true, + OperatorName: DefaultOperatorName, + CreateServiceAccountSecrets: true, } } diff --git a/scripts/dev/reset.go b/scripts/dev/reset.go index dee4a4099..86c5f8e19 100644 --- a/scripts/dev/reset.go +++ b/scripts/dev/reset.go @@ -9,6 +9,7 @@ import ( "time" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -288,6 +289,21 @@ func resetNamespace(ctx context.Context, contextName string, namespace string, d err = kubeClient.CoreV1().PersistentVolumeClaims(namespace).DeleteCollection(ctx, deleteOptionsNoGrace, v1.ListOptions{}) collectError(err, "failed to delete PVCs") + // Remove finalizers on MongoDBUsers + userGVR := schema.GroupVersionResource{ + Group: "mongodb.com", + Version: "v1", + Resource: "mongodbusers", + } + list, err := dynamicClient.Resource(userGVR).List(ctx, v1.ListOptions{}) + collectError(err, "failed to list mongodb users") + if err == nil { + for _, customRole := range list.Items { + _, err := dynamicClient.Resource(userGVR).Namespace(namespace).Patch(ctx, customRole.GetName(), types.MergePatchType, []byte(`{"metadata":{"finalizers":null}}`), v1.PatchOptions{}) + collectError(err, fmt.Sprintf("failed to patch custom role %s", customRole.GetName())) + } + } + // Delete CRDs if specified if deleteCRD { crdNames := []string{ @@ -298,6 +314,7 @@ func resetNamespace(ctx context.Context, contextName string, namespace string, d "mongodbusers.mongodb.com", "opsmanagers.mongodb.com", "mongodbsearch.mongodb.com", + "clustermongodbroles.mongodb.com", } deleteCRDs(ctx, dynamicClient, crdNames, collectError) }