Skip to content

Commit

Permalink
feat: enhance znode management with finalizer and logging improvements (
Browse files Browse the repository at this point in the history
#72)

This commit enhances the Znode controller's functionality by introducing a finalizer mechanism and improving logging for ZooKeeper client operations.

1. **Fix `listenerClass` Value in ZooKeeper Cluster Sample:**
   - Corrected the value of `listenerClass` in the ZooKeeper cluster sample configuration to ensure it matches the expected format or class name. This ensures that the listener is properly initialized and functions as intended.

2. **Znode Controller Finalizer:**
   - Implemented a finalizer mechanism in the Znode controller to handle the deletion of znodes from the ZooKeeper cluster when the corresponding Kubernetes resource is deleted. This ensures that all znodes are properly cleaned up, preventing orphaned data in the ZooKeeper cluster.

3. **Add Finalizer for Deleting Znode:**
   - Added a finalizer to the Znode controller to manage the lifecycle of znodes. When a Znode resource is deleted, the controller will perform necessary cleanup operations, such as deleting the znode from the ZooKeeper cluster, before removing the finalizer.

4. **Add Logs for Debugging ZooKeeper Client Operations:**
   - Introduced detailed logging statements within the ZooKeeper client operations to capture information about the operations being performed, such as creating, updating, and deleting znodes. These logs will aid in debugging and provide operational insights.
  • Loading branch information
lwpk110 authored Aug 1, 2024
1 parent e86435d commit 4542de8
Show file tree
Hide file tree
Showing 3 changed files with 106 additions and 51 deletions.
4 changes: 1 addition & 3 deletions config/samples/zookeeper_v1alpha1_zookeepercluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,7 @@ spec:
repository: docker.io/bitnami/zookeeper
tag: 3.9.1-debian-12-r15
clusterConfig:
service:
type: NodePort
port: 2181
listenerClass: external-unstable
server:
config:
resources:
Expand Down
93 changes: 47 additions & 46 deletions internal/znodecontroller/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@ import (
"context"
"fmt"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/finalizer"

zkv1alpha1 "github.com/zncdatadev/zookeeper-operator/api/v1alpha1"
"github.com/zncdatadev/zookeeper-operator/internal/common"
"github.com/zncdatadev/zookeeper-operator/internal/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand All @@ -35,57 +35,28 @@ func NewZNodeReconciler(
}

// reconcile
func (z *ZNodeReconciler) reconcile(ctx context.Context) (ctrl.Result, error) {
cluster, err := z.getClusterInstance(ctx)
if err != nil {
return ctrl.Result{}, err
}
func (z *ZNodeReconciler) reconcile(ctx context.Context, cluster *zkv1alpha1.ZookeeperCluster) (ctrl.Result, string, error) {
// 1. create znode in zookeeper
znodePath := z.createZnodePath()
if err = z.createZookeeperZnode(znodePath, cluster); err != nil {
return ctrl.Result{}, err
znodeLogger.Info("create znode in zookeeper", "znode path", znodePath)
if err := z.createZookeeperZnode(znodePath, cluster); err != nil {
return ctrl.Result{}, "", err
}

// 2. create configmap in zookeeper to display zookeeper cluster info
znodeLogger.Info("create configmap for zookeeper discovery", "namaspace", z.instance.Namespace,
"name", z.instance.Name, "path", znodePath)
discovery := common.NewZookeeperDiscovery(z.scheme, cluster, z.client, z.instance, &znodePath)
res, err := discovery.ReconcileResource(ctx, common.NewMultiResourceBuilder(discovery))
if err != nil {
return ctrl.Result{}, err
znodeLogger.Error(err, "create configmap for zookeeper discovery error",
"namaspace", z.instance.Namespace, "discovery owner", z.instance.Name, "path", znodePath)
return ctrl.Result{}, "", err
}
if res.RequeueAfter > 0 {
return res, nil
return res, "", nil
}
return ctrl.Result{}, nil
}

// get cluster instance
func (z *ZNodeReconciler) getClusterInstance(ctx context.Context) (*zkv1alpha1.ZookeeperCluster, error) {
clusterRef := z.instance.Spec.ClusterRef
if clusterRef == nil {
return nil, fmt.Errorf("clusterRef is nil")
}
// deprecated: when cluster reference namespace is empty, use znode cr's namespace.
//var namespace string =
//if ns := clusterRef.Namespace; ns == "" {
// namespace = metav1.NamespaceDefault
//}
namespace := clusterRef.Namespace
if namespace == "" {
namespace = z.instance.Namespace
}

clusterInstance := &zkv1alpha1.ZookeeperCluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRef.Name,
Namespace: namespace,
},
}
resourceClient := common.NewResourceClient(ctx, z.client, namespace)
err := resourceClient.Get(clusterInstance)
if err != nil {
return nil, err
}
return clusterInstance, nil
return ctrl.Result{}, znodePath, nil
}

// create znode Path
Expand All @@ -96,8 +67,8 @@ func (z *ZNodeReconciler) createZnodePath() string {

// create zookeeper znode
func (z *ZNodeReconciler) createZookeeperZnode(path string, cluster *zkv1alpha1.ZookeeperCluster) error {
svcDns := z.getClusterSvcUrl(cluster)
logger.Info("zookeeper cluster service client dns url", "dns", svcDns)
svcDns := getClusterSvcUrl(cluster)
znodeLogger.V(1).Info("zookeeper cluster service client dns url", "dns", svcDns)
// for local testing, you must add the zk service to your hosts, and then create port forwarding.
// example:
// 127.0.0.1 zookeepercluster-sample-cluster.default.svc.cluster.local
Expand All @@ -106,27 +77,57 @@ func (z *ZNodeReconciler) createZookeeperZnode(path string, cluster *zkv1alpha1.
return err
}
defer zkCli.Close()
znodeLogger.V(1).Info("check if znode exists", "dns", svcDns, "path", path)
exists, err := zkCli.Exists(path)
if err != nil {
znodeLogger.Error(err, "failed to check if znode exists", "namespace", z.instance.Namespace,
"name", z.instance.Name, "path", path)
"name", z.instance.Name, "zookeeper cluster svc dns", svcDns, "path", path)
return err
}
if exists {
znodeLogger.Info("znode already exists", "namespace", z.instance.Namespace,
znodeLogger.V(1).Info("znode already exists", "namespace", z.instance.Namespace,
"name", z.instance.Name, "zookeeper cluster svc dns", svcDns, "path", path)
return nil
}
znodeLogger.V(1).Info("create new znode in zookeeper cluster", "zk cluster svc dns", svcDns, "path", path)
err = zkCli.Create(path, []byte{})
if err != nil {
znodeLogger.Error(err, "failed to create znode", "namespace", z.instance.Namespace, "name",
z.instance.Name, "zookeeper cluster svc dns", svcDns, "path", path)
return err
}
return nil
}

// get custer service url
func (z *ZNodeReconciler) getClusterSvcUrl(cluster *zkv1alpha1.ZookeeperCluster) string {
func getClusterSvcUrl(cluster *zkv1alpha1.ZookeeperCluster) string {
svcHost := common.ClusterServiceName(cluster.Name)
dns := util.CreateDnsAccess(svcHost, cluster.Namespace, cluster.Spec.ClusterConfig.ClusterDomain)
return fmt.Sprintf("%s:%d", dns, zkv1alpha1.ClientPort)
}

const ZNodeDeleteFinalizer = "znode.zncdata.dev/delete-znode"

type ZnodeDeleteFinalizer struct {
Chroot string
ZkCluster *zkv1alpha1.ZookeeperCluster
}

func (z ZnodeDeleteFinalizer) Finalize(context.Context, client.Object) (finalizer.Result, error) {
zkAddress := getClusterSvcUrl(z.ZkCluster)
// remove znode from zookeeper cluster
zkCli, err := NewZkClient(zkAddress)
if err != nil {
return finalizer.Result{}, err
}
defer zkCli.Close()
znodeLogger.Info("delete znode from zookeeper", "znode path", z.Chroot)
err = zkCli.Delete(z.Chroot)
if err != nil {
znodeLogger.Error(err, "delete znode from zookeeper error", "zookeeper cluster dns", zkAddress,
"znode path", z.Chroot)
return finalizer.Result{}, err
}
znodeLogger.Info("delete znode from zookeeper success", "znode path", z.Chroot)
return finalizer.Result{}, nil
}
60 changes: 58 additions & 2 deletions internal/znodecontroller/zookeeperznode_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,11 @@ package znodecontroller

import (
"context"
"fmt"
"github.com/zncdatadev/zookeeper-operator/internal/common"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/finalizer"
"time"

"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -63,18 +68,69 @@ func (r *ZookeeperZnodeReconciler) Reconcile(ctx context.Context, req ctrl.Reque
r.Log.Info("Zookeeper-zNode resource not found. Ignoring since object must be deleted")
return ctrl.Result{}, nil
}

r.Log.Info("zookeeper-znode resource found", "Name", znode.Name)
zkCluster, err := r.getClusterInstance(znode, ctx)
if err != nil {
return ctrl.Result{RequeueAfter: time.Millisecond * 10000}, err
}
// reconcile order by "cluster -> role -> role-group -> resource"
result, err := NewZNodeReconciler(r.Scheme, znode, r.Client).reconcile(ctx)
result, chroot, err := NewZNodeReconciler(r.Scheme, znode, r.Client).reconcile(ctx, zkCluster)

//setup finalizer
if err := r.setupFinalizer(znode, zkCluster, ctx, chroot); err != nil {
return ctrl.Result{}, err
}

if err != nil {
return ctrl.Result{}, err
} else if result.RequeueAfter > 0 {
return result, nil
}
r.Log.Info("Reconcile successfully ", "Name", znode.Name)
return ctrl.Result{}, nil
}

// get cluster instance
func (r *ZookeeperZnodeReconciler) getClusterInstance(znode *zkv1alpha1.ZookeeperZnode, ctx context.Context) (*zkv1alpha1.ZookeeperCluster, error) {
clusterRef := znode.Spec.ClusterRef
if clusterRef == nil {
return nil, fmt.Errorf("clusterRef is nil")
}
namespace := clusterRef.Namespace
if namespace == "" {
namespace = znode.Namespace
}

clusterInstance := &zkv1alpha1.ZookeeperCluster{
ObjectMeta: metav1.ObjectMeta{
Name: clusterRef.Name,
Namespace: namespace,
},
}
resourceClient := common.NewResourceClient(ctx, r.Client, namespace)
err := resourceClient.Get(clusterInstance)
if err != nil {
return nil, err
}
return clusterInstance, nil
}

func (r *ZookeeperZnodeReconciler) setupFinalizer(cr *zkv1alpha1.ZookeeperZnode, zkCluster *zkv1alpha1.ZookeeperCluster,
ctx context.Context, chroot string) error {
finalizers := finalizer.NewFinalizers()
err := finalizers.Register(ZNodeDeleteFinalizer, ZnodeDeleteFinalizer{Chroot: chroot, ZkCluster: zkCluster})
if err != nil {
return err
}
_, err = finalizers.Finalize(ctx, cr)
if err != nil {
return err
}
err = r.Update(ctx, cr)
if err != nil {
return err
}
return nil
}

// SetupWithManager sets up the controller with the Manager.
Expand Down

0 comments on commit 4542de8

Please sign in to comment.