Skip to content

Commit

Permalink
Merge pull request #3062 from asincu/cherry_pick_policy_rec
Browse files Browse the repository at this point in the history
[cherry-pick][v1.33]PolicyRecommendation needs to access resource within a tenant namespa…
  • Loading branch information
asincu authored Dec 14, 2023
2 parents dcb6175 + 8f3cb3d commit 62016af
Show file tree
Hide file tree
Showing 7 changed files with 290 additions and 56 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -332,16 +332,26 @@ func (r *ReconcilePolicyRecommendation) Reconcile(ctx context.Context, request r
// Create a component handler to manage the rendered component.
handler := utils.NewComponentHandler(log, r.client, r.scheme, policyRecommendation)

// Determine the namespaces to which we must bind the cluster role.
// For multi-tenant, the cluster role will be bind to the service account in the tenant namespace
// For single-tenant or zero-tenant, the cluster role will be bind to the service account in the tigera-policy-recommendation
// namespace
bindNamespaces, err := helper.TenantNamespaces(r.client)
if err != nil {
return reconcile.Result{}, err
}

logc.V(3).Info("rendering components")
policyRecommendationCfg := &render.PolicyRecommendationConfiguration{
ClusterDomain: r.clusterDomain,
Installation: installation,
ManagedCluster: isManagedCluster,
PullSecrets: pullSecrets,
Openshift: r.provider == operatorv1.ProviderOpenShift,
UsePSP: r.usePSP,
Namespace: helper.InstallNamespace(),
Tenant: tenant,
ClusterDomain: r.clusterDomain,
Installation: installation,
ManagedCluster: isManagedCluster,
PullSecrets: pullSecrets,
Openshift: r.provider == operatorv1.ProviderOpenShift,
UsePSP: r.usePSP,
Namespace: helper.InstallNamespace(),
Tenant: tenant,
BindingNamespaces: bindNamespaces,
}

// Render the desired objects from the CRD and create or update them.
Expand Down
4 changes: 4 additions & 0 deletions pkg/render/common/networkpolicy/networkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@ func (h *NetworkPolicyHelper) ManagerSourceEntityRule() v3.EntityRule {
return CreateSourceEntityRule(h.namespace("tigera-manager"), "tigera-manager")
}

func (h *NetworkPolicyHelper) PolicyRecommendationSourceEntityRule() v3.EntityRule {
return CreateSourceEntityRule(h.namespace("tigera-policy-recommendation"), "tigera-policy-recommendation")
}

const PrometheusSelector = "(app == 'prometheus' && prometheus == 'calico-node-prometheus') || (app.kubernetes.io/name == 'prometheus' && prometheus == 'calico-node-prometheus')"

var PrometheusEntityRule = v3.EntityRule{
Expand Down
22 changes: 6 additions & 16 deletions pkg/render/fluentd.go
Original file line number Diff line number Diff line change
Expand Up @@ -656,16 +656,11 @@ func (c *fluentdComponent) metricsService() *corev1.Service {
}

func (c *fluentdComponent) envvars() []corev1.EnvVar {
// Determine the namespace in which Linseed is running. For managed and standalone clusters, this is always the elasticsearch
// namespace. For multi-tenant management clusters, this may vary.
linseedNS := ElasticsearchNamespace
if c.cfg.Tenant.MultiTenant() {
linseedNS = c.cfg.Tenant.Namespace
}

envs := []corev1.EnvVar{
{Name: "LINSEED_ENABLED", Value: "true"},
{Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, linseedNS)},
// Determine the namespace in which Linseed is running. For managed and standalone clusters, this is always the elasticsearch
// namespace. For multi-tenant management clusters, this may vary.
{Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, LinseedNamespace(c.cfg.Tenant))},
{Name: "LINSEED_CA_PATH", Value: c.trustedBundlePath()},
{Name: "TLS_KEY_PATH", Value: c.keyPath()},
{Name: "TLS_CRT_PATH", Value: c.certPath()},
Expand Down Expand Up @@ -1061,13 +1056,6 @@ func (c *fluentdComponent) eksLogForwarderDeployment() *appsv1.Deployment {
eksCloudwatchLogCredentialHashAnnotation: rmeta.AnnotationHash(c.cfg.EKSConfig),
}

// Determine the namespace in which Linseed is running. For managed and standalone clusters, this is always the elasticsearch
// namespace. For multi-tenant management clusters, this may vary.
linseedNS := ElasticsearchNamespace
if c.cfg.Tenant.MultiTenant() {
linseedNS = c.cfg.Tenant.Namespace
}

envVars := []corev1.EnvVar{
// Meta flags.
{Name: "LOG_LEVEL", Value: "info"},
Expand All @@ -1084,7 +1072,9 @@ func (c *fluentdComponent) eksLogForwarderDeployment() *appsv1.Deployment {
{Name: "AWS_ACCESS_KEY_ID", ValueFrom: secret.GetEnvVarSource(EksLogForwarderSecret, EksLogForwarderAwsId, false)},
{Name: "AWS_SECRET_ACCESS_KEY", ValueFrom: secret.GetEnvVarSource(EksLogForwarderSecret, EksLogForwarderAwsKey, false)},
{Name: "LINSEED_ENABLED", Value: "true"},
{Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, linseedNS)},
// Determine the namespace in which Linseed is running. For managed and standalone clusters, this is always the elasticsearch
// namespace. For multi-tenant management clusters, this may vary.
{Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, LinseedNamespace(c.cfg.Tenant))},
{Name: "LINSEED_CA_PATH", Value: c.trustedBundlePath()},
{Name: "TLS_CRT_PATH", Value: c.cfg.EKSLogForwarderKeyPair.VolumeMountCertificateFilePath()},
{Name: "TLS_KEY_PATH", Value: c.cfg.EKSLogForwarderKeyPair.VolumeMountKeyFilePath()},
Expand Down
16 changes: 13 additions & 3 deletions pkg/render/logstorage/linseed/linseed.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,8 +396,8 @@ func (l *linseed) linseedDeployment() *appsv1.Deployment {
envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_EXPECTED_TENANT_ID", Value: l.cfg.Tenant.Spec.ID})

if l.cfg.Tenant.MultiTenant() {
// For clusters shared between muliple tenants, we need to configure Linseed with the correct namespace information for its tenant.
envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: fmt.Sprintf("https://tigera-manager.%s.svc:9443", l.cfg.Tenant.Namespace)})
// For clusters shared between multiple tenants, we need to configure Linseed with the correct namespace information for its tenant.
envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_MULTI_CLUSTER_FORWARDING_ENDPOINT", Value: render.ManagerService(l.cfg.Tenant)})
envVars = append(envVars, corev1.EnvVar{Name: "LINSEED_TENANT_NAMESPACE", Value: l.cfg.Tenant.Namespace})

// We also use shared indices for multi-tenant clusters.
Expand Down Expand Up @@ -652,7 +652,7 @@ func (l *linseed) linseedAllowTigeraPolicy() *v3.NetworkPolicy {
{
Action: v3.Allow,
Protocol: &networkpolicy.TCPProtocol,
Source: render.PolicyRecommendationEntityRule,
Source: networkpolicy.Helper(l.cfg.Tenant.MultiTenant(), l.cfg.Namespace).PolicyRecommendationSourceEntityRule(),
Destination: linseedIngressDestinationEntityRule,
},
}
Expand Down Expand Up @@ -684,3 +684,13 @@ func (l *linseed) linseedAllowTigeraPolicy() *v3.NetworkPolicy {
},
}
}

// LinseedNamespace determine the namespace in which Linseed is running.
// For management and standalone clusters, this is always the tigera-elasticsearch
// namespace. For multi-tenant management clusters, this is the tenant namespace
func LinseedNamespace(tenant *operatorv1.Tenant) string {
if tenant.MultiTenant() {
return tenant.Namespace
}
return "tigera-elasticsearch"
}
123 changes: 94 additions & 29 deletions pkg/render/policyrecommendation.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ package render

import (
"crypto/x509"
"fmt"

"k8s.io/apiserver/pkg/authentication/serviceaccount"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
Expand All @@ -28,6 +31,7 @@ import (
operatorv1 "github.com/tigera/operator/api/v1"
"github.com/tigera/operator/pkg/components"
"github.com/tigera/operator/pkg/ptr"
rcomponents "github.com/tigera/operator/pkg/render/common/components"
relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch"
rmeta "github.com/tigera/operator/pkg/render/common/meta"
"github.com/tigera/operator/pkg/render/common/networkpolicy"
Expand All @@ -47,11 +51,10 @@ const (
PolicyRecommendationPodSecurityPolicyName = PolicyRecommendationName
PolicyRecommendationPolicyName = networkpolicy.TigeraComponentPolicyPrefix + PolicyRecommendationName

PolicyRecommendationTLSSecretName = "policy-recommendation-tls"
PolicyRecommendationTLSSecretName = "policy-recommendation-tls"
PolicyRecommendationMultiTenantManagedClustersAccessClusterRoleName = "tigera-policy-recommendation-managed-cluster-access"
)

var PolicyRecommendationEntityRule = networkpolicy.CreateSourceEntityRule(PolicyRecommendationNamespace, PolicyRecommendationName)

// Register secret/certs that need Server and Client Key usage
func init() {
certkeyusage.SetCertKeyUsage(PolicyRecommendationTLSSecretName, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth})
Expand All @@ -68,8 +71,9 @@ type PolicyRecommendationConfiguration struct {
PolicyRecommendationCertSecret certificatemanagement.KeyPairInterface

// Whether the cluster supports pod security policies.
UsePSP bool
Namespace string
UsePSP bool
Namespace string
BindingNamespaces []string

// Whether or not to run the rendered components in multi-tenant mode.
Tenant *operatorv1.Tenant
Expand Down Expand Up @@ -113,6 +117,9 @@ func (pr *policyRecommendationComponent) Objects() ([]client.Object, []client.Ob
pr.clusterRoleBinding(),
networkpolicy.AllowTigeraDefaultDeny(pr.cfg.Namespace),
}
if pr.cfg.Tenant.MultiTenant() {
objs = append(objs, pr.multiTenantManagedClustersAccess()...)
}

if pr.cfg.ManagedCluster {
// No further resources are needed for managed clusters
Expand Down Expand Up @@ -145,16 +152,6 @@ func (pr *policyRecommendationComponent) clusterRole() client.Object {
Resources: []string{"namespaces"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{"licensekeys", "managedclusters"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"crd.projectcalico.org"},
Resources: []string{"licensekeys"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
Expand All @@ -179,6 +176,21 @@ func (pr *policyRecommendationComponent) clusterRole() client.Object {
},
}

if !pr.cfg.ManagedCluster {
rules = append(rules, []rbacv1.PolicyRule{
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{"licensekeys", "managedclusters"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{"crd.projectcalico.org"},
Resources: []string{"licensekeys"},
Verbs: []string{"get", "list", "watch"},
},
}...)
}

if pr.cfg.UsePSP {
rules = append(rules, rbacv1.PolicyRule{
APIGroups: []string{"policy"},
Expand All @@ -188,6 +200,30 @@ func (pr *policyRecommendationComponent) clusterRole() client.Object {
})
}

if pr.cfg.Tenant.MultiTenant() {
// These rules are used by policy-recommendation in a management cluster serving multiple tenants in order to appear to managed
// clusters as the expected serviceaccount. They're only needed when there are multiple tenants sharing the same
// management cluster.
rules = append(rules, []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"serviceaccounts"},
Verbs: []string{"impersonate"},
ResourceNames: []string{PolicyRecommendationName},
},
{
APIGroups: []string{""},
Resources: []string{"groups"},
Verbs: []string{"impersonate"},
ResourceNames: []string{
serviceaccount.AllServiceAccountsGroup,
"system:authenticated",
fmt.Sprintf("%s%s", serviceaccount.ServiceAccountGroupPrefix, PolicyRecommendationNamespace),
},
},
}...)
}

return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -198,29 +234,54 @@ func (pr *policyRecommendationComponent) clusterRole() client.Object {
}

func (pr *policyRecommendationComponent) clusterRoleBinding() client.Object {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: PolicyRecommendationName,
return rcomponents.ClusterRoleBinding(PolicyRecommendationName, PolicyRecommendationName, PolicyRecommendationNamespace, pr.cfg.BindingNamespaces)
}

func (pr *policyRecommendationComponent) multiTenantManagedClustersAccess() []client.Object {
var objects []client.Object
objects = append(objects, &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: PolicyRecommendationMultiTenantManagedClustersAccessClusterRoleName},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{"managedclusters"},
Verbs: []string{
// The Authentication Proxy in Voltron checks if PolicyRecommendation (either using impersonation
// headers for tigera-policy-recommendation service in tigera-policy-recommendation namespace or
// the actual account in a single tenant setup) can get a managed clusters before sending the
// request down the tunnel
"get",
},
},
},
})

// In a single tenant setup we want to create a cluster role that binds using service account
// tigera-policy-recommendation from tigera-policy-recommendation namespace. In a multi-tenant setup
// PolicyRecommendation Controller from the tenant's namespace impersonates service tigera-policy-recommendation
// from tigera-policy-recommendation namespace
objects = append(objects, &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: PolicyRecommendationMultiTenantManagedClustersAccessClusterRoleName},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: PolicyRecommendationName,
Name: PolicyRecommendationMultiTenantManagedClustersAccessClusterRoleName,
},
Subjects: []rbacv1.Subject{
// requests for policy recommendation to managed clusters are done using service account tigera-policy-recommendation
// from tigera-policy-recommendation namespace regardless of tenancy mode (single tenant or multi-tenant)
{
Kind: "ServiceAccount",
Name: PolicyRecommendationName,
Namespace: pr.cfg.Namespace,
Namespace: PolicyRecommendationNamespace,
},
},
}
}
})

return objects
}
func (pr *policyRecommendationComponent) podSecurityPolicy() *policyv1beta1.PodSecurityPolicy {
return podsecuritypolicy.NewBasePolicy(PolicyRecommendationPodSecurityPolicyName)
}
Expand All @@ -237,9 +298,13 @@ func (pr *policyRecommendationComponent) deployment() *appsv1.Deployment {
Name: "MULTI_CLUSTER_FORWARDING_CA",
Value: pr.cfg.TrustedBundle.MountPath(),
},
{
Name: "MULTI_CLUSTER_FORWARDING_ENDPOINT",
Value: ManagerService(pr.cfg.Tenant),
},
{
Name: "LINSEED_URL",
Value: relasticsearch.LinseedEndpoint(pr.SupportedOSType(), pr.cfg.ClusterDomain, ElasticsearchNamespace),
Value: relasticsearch.LinseedEndpoint(pr.SupportedOSType(), pr.cfg.ClusterDomain, LinseedNamespace(pr.cfg.Tenant)),
},
{
Name: "LINSEED_CA",
Expand Down Expand Up @@ -340,15 +405,15 @@ func (pr *policyRecommendationComponent) allowTigeraPolicyForPolicyRecommendatio
{
Action: v3.Allow,
Protocol: &networkpolicy.TCPProtocol,
Destination: networkpolicy.DefaultHelper().ManagerEntityRule(),
Destination: networkpolicy.Helper(pr.cfg.Tenant.MultiTenant(), pr.cfg.Namespace).ManagerEntityRule(),
},
}

if !pr.cfg.ManagedCluster {
egressRules = append(egressRules, v3.Rule{
Action: v3.Allow,
Protocol: &networkpolicy.TCPProtocol,
Destination: networkpolicy.DefaultHelper().LinseedEntityRule(),
Destination: networkpolicy.Helper(pr.cfg.Tenant.MultiTenant(), pr.cfg.Namespace).LinseedEntityRule(),
})
}

Expand Down
Loading

0 comments on commit 62016af

Please sign in to comment.