Skip to content

Commit

Permalink
Security Mode: E2E tests
Browse files Browse the repository at this point in the history
co-authored-by: Alessandro Olivero <[email protected]>
  • Loading branch information
cheina97 and aleoli committed Oct 24, 2023
1 parent 401b1d9 commit 7de6fd2
Show file tree
Hide file tree
Showing 8 changed files with 69 additions and 33 deletions.
2 changes: 2 additions & 0 deletions cmd/liqonet/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
discoveryv1alpha1 "github.com/liqotech/liqo/apis/discovery/v1alpha1"
netv1alpha1 "github.com/liqotech/liqo/apis/net/v1alpha1"
offloadingv1alpha1 "github.com/liqotech/liqo/apis/offloading/v1alpha1"
virtualkubeletv1alpha1 "github.com/liqotech/liqo/apis/virtualkubelet/v1alpha1"
liqoconst "github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/utils/restcfg"
)
Expand All @@ -49,6 +50,7 @@ func init() {
utilruntime.Must(discoveryv1alpha1.AddToScheme(scheme))
utilruntime.Must(netv1alpha1.AddToScheme(scheme))
utilruntime.Must(offloadingv1alpha1.AddToScheme(scheme))
utilruntime.Must(virtualkubeletv1alpha1.AddToScheme(scheme))
}

func main() {
Expand Down
8 changes: 8 additions & 0 deletions deployments/liqo/files/liqo-gateway-ClusterRole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -108,3 +108,11 @@ rules:
- get
- list
- watch
- apiGroups:
- virtualkubelet.liqo.io
resources:
- virtualnodes
verbs:
- get
- list
- watch
16 changes: 10 additions & 6 deletions internal/liqonet/tunnel-operator/offloaded_pod_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (

"github.com/containernetworking/plugins/pkg/ns"
corev1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -75,14 +76,14 @@ func (r *OffloadedPodController) Reconcile(ctx context.Context, req ctrl.Request
return r.EnsureRulesForClustersForwarding(r.podsInfo, r.endpointslicesInfo, r.IPSHandler)
}
nsName := req.NamespacedName
klog.Infof("Reconcile Pod %q", nsName)
klog.V(3).Infof("Reconcile Pod %q", nsName)

pod := corev1.Pod{}
if err := r.Get(ctx, nsName, &pod); err != nil {
if client.IgnoreNotFound(err) == nil {
if apierror.IsNotFound(err) {
// Pod not found, podInfo object found: delete podInfo object
if value, ok := r.podsInfo.LoadAndDelete(nsName); ok {
klog.Infof("Pod %q not found: ensuring updated iptables rules", nsName)
klog.V(3).Infof("Pod %q not found: ensuring updated iptables rules", nsName)

// Soft delete object
podInfo := value.(liqoiptables.PodInfo)
Expand All @@ -96,7 +97,10 @@ func (r *OffloadedPodController) Reconcile(ctx context.Context, req ctrl.Request
// Hard delete object
r.podsInfo.Delete(nsName)
}

return ctrl.Result{}, nil
}

return ctrl.Result{}, err
}

Expand All @@ -109,22 +113,22 @@ func (r *OffloadedPodController) Reconcile(ctx context.Context, req ctrl.Request
// Check if the object is under deletion
if !pod.ObjectMeta.DeletionTimestamp.IsZero() {
// Pod under deletion: skip creation of iptables rules and return no error
klog.Infof("Pod %q under deletion: skipping iptables rules update", nsName)
klog.V(3).Infof("Pod %q under deletion: skipping iptables rules update", nsName)
return ctrl.Result{}, nil
}

// Check if the pod IP is set
if podInfo.PodIP == "" {
// Pod IP address not yet set: skip creation of iptables rules and return no error
klog.Infof("Pod %q IP address not yet set: skipping iptables rules update", nsName)
klog.V(3).Infof("Pod %q IP address not yet set: skipping iptables rules update", nsName)
return ctrl.Result{}, nil
}

// Store podInfo object
r.podsInfo.Store(nsName, podInfo)

// Ensure iptables rules
klog.Infof("Ensuring updated iptables rules")
klog.V(3).Infof("Ensuring updated iptables rules")
if err := r.gatewayNetns.Do(ensureIptablesRules); err != nil {
klog.Errorf("Error while ensuring iptables rules: %w", err)
return ctrl.Result{}, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (

"github.com/containernetworking/plugins/pkg/ns"
discoveryv1 "k8s.io/api/discovery/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
Expand Down Expand Up @@ -67,6 +68,7 @@ type ReflectedEndpointsliceController struct {
// +kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices/endpoints/addresses,verbs=get;list;watch
// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch
// +kubebuilder:rbac:groups=offloading.liqo.io,resources=namespaceoffloadings,verbs=get;list;watch
// +kubebuilder:rbac:groups=virtualkubelet.liqo.io,resources=virtualnodes,verbs=get;list;watch

// NewReflectedEndpointsliceController instantiates and initializes the reflected endpointslice controller.
func NewReflectedEndpointsliceController(
Expand Down Expand Up @@ -100,14 +102,14 @@ func (r *ReflectedEndpointsliceController) Reconcile(ctx context.Context, req ct
return r.EnsureRulesForClustersForwarding(r.podsInfo, r.endpointslicesInfo, r.IPSHandler)
}
nsName := req.NamespacedName
klog.Infof("Reconcile Endpointslice %q", nsName)
klog.V(3).Infof("Reconcile Endpointslice %q", nsName)

endpointslice := discoveryv1.EndpointSlice{}
if err := r.Get(ctx, nsName, &endpointslice); err != nil {
if client.IgnoreNotFound(err) == nil {
if apierror.IsNotFound(err) {
// Endpointslice not found, endpointsliceInfo object found: delete endpointInfo objects.
if value, ok := r.endpointslicesInfo.LoadAndDelete(nsName); ok {
klog.Infof("Endpointslice %q not found: ensuring updated iptables rules", nsName)
klog.V(3).Infof("Endpointslice %q not found: ensuring updated iptables rules", nsName)

// Soft delete object
endpointsInfo := value.(map[string]liqoiptables.EndpointInfo)
Expand All @@ -124,43 +126,48 @@ func (r *ReflectedEndpointsliceController) Reconcile(ctx context.Context, req ct
// Hard delete object
r.endpointslicesInfo.Delete(nsName)
}

return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}

// Check endpointslice's namespace offloading
nsOffloading, err := getters.GetOffloadingByNamespace(ctx, r.Client, endpointslice.Namespace)
if err != nil {
if client.IgnoreNotFound(err) == nil {
// Delete endpointInfo objects related to this endpointslice
if value, ok := r.endpointslicesInfo.LoadAndDelete(nsName); ok {
// Endpointslice not found, endpointsliceInfo object found: ensure iptables rules
klog.Infof("Endpointslice %q not found: ensuring updated iptables rules", nsName)
if apierror.IsNotFound(err) {
return ctrl.Result{}, nil
}

// Soft delete object
endpointsInfo := value.(map[string]liqoiptables.EndpointInfo)
for endpoint, endpointInfo := range endpointsInfo {
endpointInfo.Deleting = true
endpointsInfo[endpoint] = endpointInfo
}
r.endpointslicesInfo.Store(nsName, endpointsInfo)
// Delete endpointInfo objects related to this endpointslice
if value, ok := r.endpointslicesInfo.LoadAndDelete(nsName); ok {
// Endpointslice not found, endpointsliceInfo object found: ensure iptables rules
klog.V(3).Infof("Endpointslice %q not found: ensuring updated iptables rules", nsName)

if err := r.gatewayNetns.Do(ensureIptablesRules); err != nil {
return ctrl.Result{}, fmt.Errorf("error while ensuring iptables rules: %w", err)
}
// Soft delete object
endpointsInfo := value.(map[string]liqoiptables.EndpointInfo)
for endpoint, endpointInfo := range endpointsInfo {
endpointInfo.Deleting = true
endpointsInfo[endpoint] = endpointInfo
}
r.endpointslicesInfo.Store(nsName, endpointsInfo)

// Hard delete object
r.endpointslicesInfo.Delete(nsName)
if err := r.gatewayNetns.Do(ensureIptablesRules); err != nil {
return ctrl.Result{}, fmt.Errorf("error while ensuring iptables rules: %w", err)
}

// Hard delete object
r.endpointslicesInfo.Delete(nsName)
}

return ctrl.Result{}, err
}

clusterSelector := nsOffloading.Spec.ClusterSelector

nodes := virtualkubeletv1alpha1.VirtualNodeList{}
if err := r.List(ctx, &nodes); err != nil {
return ctrl.Result{}, fmt.Errorf("%w", err)
return ctrl.Result{}, err
}

// Build endpointInfo objects
Expand All @@ -175,7 +182,7 @@ func (r *ReflectedEndpointsliceController) Reconcile(ctx context.Context, req ct

matchClusterSelctor, err := nsoffctrl.MatchVirtualNodeSelectorTerms(ctx, r.Client, &nodes.Items[i], &clusterSelector)
if err != nil {
return ctrl.Result{}, fmt.Errorf("%w", err)
return ctrl.Result{}, err
}

if matchClusterSelctor {
Expand Down Expand Up @@ -218,7 +225,7 @@ func (r *ReflectedEndpointsliceController) Reconcile(ctx context.Context, req ct
r.endpointslicesInfo.Store(nsName, endpointsInfo)

// Ensure iptables rules
klog.Infof("Ensuring updated iptables rules")
klog.V(3).Infof("Ensuring updated iptables rules")
if err := r.gatewayNetns.Do(ensureIptablesRules); err != nil {
return ctrl.Result{}, fmt.Errorf("error while ensuring iptables rules: %w", err)
}
Expand All @@ -233,7 +240,7 @@ func (r *ReflectedEndpointsliceController) endpointsliceEnqueuer(ctx context.Con

// If gvk is found we log.
if len(gvks) != 0 {
klog.Infof("handling resource %q of type %q", klog.KObj(obj), gvks[0].String())
klog.V(4).Infof("handling resource %q of type %q", klog.KObj(obj), gvks[0].String())
}

endpointslices := discoveryv1.EndpointSliceList{}
Expand All @@ -243,7 +250,7 @@ func (r *ReflectedEndpointsliceController) endpointsliceEnqueuer(ctx context.Con
}

if len(endpointslices.Items) == 0 {
klog.Infof("no endpointslice found for resource %q", klog.KObj(obj))
klog.V(4).Infof("no endpointslice found for resource %q", klog.KObj(obj))
return []ctrl.Request{}
}

Expand All @@ -265,7 +272,7 @@ func (r *ReflectedEndpointsliceController) SetupWithManager(mgr ctrl.Manager) er
},
})
if err != nil {
return fmt.Errorf("%w", err)
return err
}

return ctrl.NewControllerManagedBy(mgr).
Expand Down
8 changes: 8 additions & 0 deletions test/e2e/cruise/basic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,10 @@ var _ = Describe("Liqo E2E", func() {
for index1 := range testContext.Clusters {
for index2 := range testContext.Clusters {
if index2 != index1 {
if testContext.SecurityMode == liqoconst.IntraClusterTrafficSegregationSecurityMode && index1 != 0 {
// this will work only for pod offloaded from the cluster, not the viceversa
continue
}
ConnectivityCheckTableEntries = append(ConnectivityCheckTableEntries,
Entry(fmt.Sprintf("Check Pod to Pod connectivity from cluster %v to cluster %v", index1+1, index2+1),
connectivityTestcase{
Expand Down Expand Up @@ -447,6 +451,10 @@ var _ = Describe("Liqo E2E", func() {
)

BeforeEach(func() {
if testContext.SecurityMode == liqoconst.IntraClusterTrafficSegregationSecurityMode {
Skip("Skip API server interaction test because it is not working with IntraClusterTrafficSegregationSecurityMode, waiting to fix it")
}

client, err := discovery.NewDiscoveryClientForConfig(testContext.Clusters[0].Config)
Expect(err).ToNot(HaveOccurred())
v, err = client.ServerVersion()
Expand Down
4 changes: 3 additions & 1 deletion test/e2e/pipeline/installer/liqoctl/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
# LIQOCTL -> the path where liqoctl is stored
# KUBECTL -> the path where kubectl is stored
# POD_CIDR_OVERLAPPING -> the pod CIDR of the clusters is overlapping
# SECURITY_MODE -> the security mode to use
# CLUSTER_TEMPLATE_FILE -> the file where the cluster template is stored

set -e # Fail in case of error
Expand Down Expand Up @@ -52,6 +53,7 @@ function get_cluster_labels() {
}

LIQO_VERSION="${LIQO_VERSION:-$(git rev-parse HEAD)}"
SECURITY_MODE="${SECURITY_MODE:-"FullPodToPod"}"

export SERVICE_CIDR=10.100.0.0/16
export POD_CIDR=10.200.0.0/16
Expand All @@ -66,7 +68,7 @@ do
export POD_CIDR="10.$((i * 10)).0.0/16"
fi
COMMON_ARGS=(--cluster-name "liqo-${i}" --local-chart-path ./deployments/liqo
--version "${LIQO_VERSION}" --set controllerManager.config.enableResourceEnforcement=true)
--version "${LIQO_VERSION}" --set controllerManager.config.enableResourceEnforcement=true --set "networking.securityMode=${SECURITY_MODE}")
if [[ "${CLUSTER_LABELS}" != "" ]]; then
COMMON_ARGS=("${COMMON_ARGS[@]}" --cluster-labels "${CLUSTER_LABELS}")
fi
Expand Down
1 change: 1 addition & 0 deletions test/e2e/testconsts/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ const (
ClusterNumberVarKey = "CLUSTER_NUMBER"
KubeconfigDirVarName = "KUBECONFIGDIR"
OverlappingCIDRsEnvVar = "POD_CIDR_OVERLAPPING"
SecurityModeEnvVar = "SECURITY_MODE"
)

// LiqoTestNamespaceLabels is a set of labels that has to be attached to test namespaces to simplify garbage collection.
Expand Down
4 changes: 4 additions & 0 deletions test/e2e/testutils/tester/tester.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
offv1alpha1 "github.com/liqotech/liqo/apis/offloading/v1alpha1"
sharingv1alpha1 "github.com/liqotech/liqo/apis/sharing/v1alpha1"
virtualKubeletv1alpha1 "github.com/liqotech/liqo/apis/virtualkubelet/v1alpha1"
"github.com/liqotech/liqo/pkg/consts"
"github.com/liqotech/liqo/pkg/utils"
"github.com/liqotech/liqo/test/e2e/testconsts"
testutils "github.com/liqotech/liqo/test/e2e/testutils/util"
Expand All @@ -46,6 +47,7 @@ type Tester struct {
// ClustersNumber represents the number of available clusters
ClustersNumber int
OverlappingCIDRs bool
SecurityMode consts.SecurityModeType
}

// ClusterContext encapsulate all information and objects used to access a test cluster.
Expand Down Expand Up @@ -103,13 +105,15 @@ func createTester(ctx context.Context, ignoreClusterIDError bool) (*Tester, erro
TmpDir := testutils.GetEnvironmentVariableOrDie(testconsts.KubeconfigDirVarName)

overlappingCIDRsString := testutils.GetEnvironmentVariableOrDie(testconsts.OverlappingCIDRsEnvVar)
securityModeString := testutils.GetEnvironmentVariableOrDie(testconsts.SecurityModeEnvVar)

// Here is necessary to add the controller runtime clients.
scheme := getScheme()

tester = &Tester{
Namespace: namespace,
OverlappingCIDRs: strings.EqualFold(overlappingCIDRsString, "true"),
SecurityMode: consts.SecurityModeType(securityModeString),
}

tester.ClustersNumber, err = getClusterNumberFromEnv()
Expand Down

0 comments on commit 7de6fd2

Please sign in to comment.