From affc89ac7c1ab67930a78dcf15b153883f804dbd Mon Sep 17 00:00:00 2001 From: Alexandre Guitton <10503351+erdrix@users.noreply.github.com> Date: Mon, 22 Aug 2022 12:27:50 +0200 Subject: [PATCH] Auto scaler (#139) * Added NifiNodeGroupAutoscaler with basic scaling strategies * fix typo, update CRDs * update changelog, add CRD documentation to site * Add missing finalizer * fix docs * Update api/v1alpha1/nifinodegroupautoscaler_types.go Co-authored-by: Alexandre Guitton <10503351+erdrix@users.noreply.github.com> * do not create HPA, centralize nodeId assignment, refactor scaling strategies, added CreationTime to NifiCluster.Status.NodeState, various PR feedback * added NodeConfig to autoscaler spec * Update docs * relocate nodeId assignment logic to strategy package since that's where it's used * Added NifiNodeGroupAutoscaler with basic scaling strategies * fix typo, update CRDs * update changelog, add CRD documentation to site * Add missing finalizer * fix docs * Update api/v1alpha1/nifinodegroupautoscaler_types.go Co-authored-by: Alexandre Guitton <10503351+erdrix@users.noreply.github.com> * do not create HPA, centralize nodeId assignment, refactor scaling strategies, added CreationTime to NifiCluster.Status.NodeState, various PR feedback * added NodeConfig to autoscaler spec * Update docs * relocate nodeId assignment logic to strategy package since that's where it's used * make node status creation time optional to avoid breaking pre-existing clusters * fixed LIFO downscale bug, added unit tests * revert minor change * update CRDs after merging master * update CRDs after merging master in * remove pods restart at scale up * rework creationTime to be node creationTime instead of pod creationTime & changes log level according to review * correct unit tests * allow to scale to 0 nodes * regenerate go sum * Bump to version 0.13.0 * Bump to version 0.13.0 Co-authored-by: Michael H Co-authored-by: michael81877 <86672176+michael81877@users.noreply.github.com> --- .gitignore | 4 +- CHANGELOG.md | 6 + PROJECT | 48 +- api/v1alpha1/common_types.go | 40 +- api/v1alpha1/nificluster_types.go | 54 +- api/v1alpha1/nificluster_types_test.go | 51 + api/v1alpha1/nifinodegroupautoscaler_types.go | 99 + api/v1alpha1/zz_generated.deepcopy.go | 157 +- .../nifi.konpyutaika.com_nificlusters.yaml | 17 + ...pyutaika.com_nifinodegroupautoscalers.yaml | 2713 +++++++++++++++++ config/crd/kustomization.yaml | 3 + ...injection_in_nifinodegroupautoscalers.yaml | 7 + .../webhook_in_nifinodegroupautoscalers.yaml | 16 + config/manager/kustomization.yaml | 3 +- config/manager/manager.yaml | 2 +- .../nifinodegroupautoscaler_editor_role.yaml | 24 + .../nifinodegroupautoscaler_viewer_role.yaml | 20 + config/rbac/role.yaml | 26 + .../keycloak-example/step-1/operator.yaml | 2 +- config/samples/kustomization.yaml | 1 + config/samples/nifi_v1alpha1_nificluster.yaml | 4 +- ...nifi_v1alpha1_nifinodegroupautoscaler.yaml | 32 + controllers/nifidataflow_controller.go | 2 +- .../nifinodegroupautoscaler_controller.go | 335 ++ controllers/nifiuser_controller.go | 1 + controllers/nifiusergroup_controller.go | 1 + controllers/suite_test.go | 25 +- go.mod | 1 + go.sum | 7 +- helm/nifikop/Chart.yaml | 4 +- helm/nifikop/README.md | 2 +- .../nifi.konpyutaika.com_nificlusters.yaml | 17 + ...pyutaika.com_nifinodegroupautoscalers.yaml | 2713 +++++++++++++++++ helm/nifikop/templates/role.yaml | 14 + helm/nifikop/values.yaml | 3 +- main.go | 16 +- pkg/autoscale/strategy.go | 141 + pkg/autoscale/strategy_test.go | 219 ++ pkg/common/common.go | 28 +- pkg/k8sutil/resource.go | 1 + pkg/k8sutil/status.go | 36 + pkg/resources/nifi/nifi.go | 54 +- pkg/resources/nifi/pod.go | 56 +- pkg/resources/nifi/pvc.go | 2 +- pkg/resources/templates/variables.go | 1 + pkg/util/nifi/common.go | 3 +- pkg/util/util.go | 25 + pkg/util/util_test.go | 39 +- site/docs/1_concepts/3_features.md | 14 +- site/docs/2_setup/1_getting_started.md | 4 +- .../1_customizable_install_with_helm.md | 2 +- .../1_nifi_cluster/5_node_state.md | 2 + .../7_nifi_nodegroup_autoscaler.md | 59 + site/website/sidebars.json | 8 +- .../1_concepts/1_introduction.md | 34 + .../1_concepts/2_design_principes.md | 62 + .../version-v0.13.0/1_concepts/3_features.md | 67 + .../version-v0.13.0/1_concepts/4_roadmap.md | 95 + .../2_setup/1_getting_started.md | 152 + .../2_setup/2_platform_setup/1_gke.md | 42 + .../2_setup/2_platform_setup/2_k3d.md | 26 + .../1_customizable_install_with_helm.md | 197 ++ .../1_nifi_cluster/1_nodes_configuration.md | 9 + .../1_nifi_cluster/2_cluster_scaling.md | 237 ++ .../3_tasks/1_nifi_cluster/3_external_dns.md | 9 + .../1_nifi_cluster/4_external_cluster.md | 93 + .../3_tasks/2_security/1_ssl.md | 159 + .../2_security/2_authentication/1_oidc.md | 42 + .../2_authorization/1_custom_authorizer.md | 83 + .../3_tasks/3_nifi_dataflow.md | 126 + .../3_tasks/4_nifi_user_group.md | 168 + .../4_examples/1_simple_nifi_cluster.md | 5 + .../1_nifi_cluster/1_nifi_cluster.md | 202 ++ .../1_nifi_cluster/2_read_only_config.md | 222 ++ .../1_nifi_cluster/3_node_config.md | 108 + .../5_references/1_nifi_cluster/4_node.md | 60 + .../1_nifi_cluster/5_node_state.md | 73 + .../1_nifi_cluster/6_listeners_config.md | 56 + .../7_external_service_config.md | 56 + .../5_references/2_nifi_user.md | 101 + .../5_references/3_nifi_registry_client.md | 42 + .../5_references/4_nifi_parameter_context.md | 86 + .../5_references/5_nifi_dataflow.md | 136 + .../5_references/6_nifi_usergroup.md | 55 + .../7_nifi_nodegroup_autoscaler.md | 59 + .../0_contribution_organization.md | 66 + .../6_contributing/1_developer_guide.md | 144 + .../6_contributing/2_reporting_bugs.md | 25 + .../6_contributing/3_credits.md | 11 + .../7_upgrade/1_v0.7.x_to_v0.8.0.md | 165 + .../version-v0.13.0-sidebars.json | 227 ++ site/website/versions.json | 1 + version/version.go | 2 +- 93 files changed, 10498 insertions(+), 169 deletions(-) create mode 100644 api/v1alpha1/nificluster_types_test.go create mode 100644 api/v1alpha1/nifinodegroupautoscaler_types.go create mode 100644 config/crd/bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml create mode 100644 config/crd/patches/cainjection_in_nifinodegroupautoscalers.yaml create mode 100644 config/crd/patches/webhook_in_nifinodegroupautoscalers.yaml create mode 100644 config/rbac/nifinodegroupautoscaler_editor_role.yaml create mode 100644 config/rbac/nifinodegroupautoscaler_viewer_role.yaml create mode 100644 config/samples/nifi_v1alpha1_nifinodegroupautoscaler.yaml create mode 100644 controllers/nifinodegroupautoscaler_controller.go create mode 100644 helm/nifikop/crds/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml create mode 100644 pkg/autoscale/strategy.go create mode 100644 pkg/autoscale/strategy_test.go create mode 100644 site/docs/5_references/7_nifi_nodegroup_autoscaler.md create mode 100644 site/website/versioned_docs/version-v0.13.0/1_concepts/1_introduction.md create mode 100644 site/website/versioned_docs/version-v0.13.0/1_concepts/2_design_principes.md create mode 100644 site/website/versioned_docs/version-v0.13.0/1_concepts/3_features.md create mode 100644 site/website/versioned_docs/version-v0.13.0/1_concepts/4_roadmap.md create mode 100644 site/website/versioned_docs/version-v0.13.0/2_setup/1_getting_started.md create mode 100644 site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/1_gke.md create mode 100644 site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/2_k3d.md create mode 100644 site/website/versioned_docs/version-v0.13.0/2_setup/3_install/1_customizable_install_with_helm.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/3_external_dns.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/4_external_cluster.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/1_ssl.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authentication/1_oidc.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authorization/1_custom_authorizer.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/3_nifi_dataflow.md create mode 100644 site/website/versioned_docs/version-v0.13.0/3_tasks/4_nifi_user_group.md create mode 100644 site/website/versioned_docs/version-v0.13.0/4_examples/1_simple_nifi_cluster.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/1_nifi_cluster.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/2_read_only_config.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/3_node_config.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/4_node.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/5_node_state.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/6_listeners_config.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/7_external_service_config.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/2_nifi_user.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/3_nifi_registry_client.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/4_nifi_parameter_context.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/5_nifi_dataflow.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/6_nifi_usergroup.md create mode 100644 site/website/versioned_docs/version-v0.13.0/5_references/7_nifi_nodegroup_autoscaler.md create mode 100644 site/website/versioned_docs/version-v0.13.0/6_contributing/0_contribution_organization.md create mode 100644 site/website/versioned_docs/version-v0.13.0/6_contributing/1_developer_guide.md create mode 100644 site/website/versioned_docs/version-v0.13.0/6_contributing/2_reporting_bugs.md create mode 100644 site/website/versioned_docs/version-v0.13.0/6_contributing/3_credits.md create mode 100644 site/website/versioned_docs/version-v0.13.0/7_upgrade/1_v0.7.x_to_v0.8.0.md create mode 100644 site/website/versioned_sidebars/version-v0.13.0-sidebars.json diff --git a/.gitignore b/.gitignore index dbe5334987..adba829194 100644 --- a/.gitignore +++ b/.gitignore @@ -125,4 +125,6 @@ testbin/* # editor and IDE paraphernalia -*~ \ No newline at end of file +*~ + +vendor/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 00ddb1992d..532a6ed03e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,12 @@ ### Removed +## v0.13.0 + +### Added + +- [PR #89](https://github.com/konpyutaika/nifikop/pull/89) - **[Operator/NifiNodeGroupAutoscaler]** Add NifiNodeGroupAutoscaler to automatically horizontally scale a NifiCluster resource via the Kubernetes HorizontalPodAutoscaler. + ## v0.12.0 ### Added diff --git a/PROJECT b/PROJECT index 04a7e29537..459de5f724 100644 --- a/PROJECT +++ b/PROJECT @@ -1,81 +1,61 @@ domain: konpyutaika.com -layout: go.kubebuilder.io/v3 +layout: +- go.kubebuilder.io/v3 +plugins: + manifests.sdk.operatorframework.io/v2: {} + scorecard.sdk.operatorframework.io/v2: {} projectName: nifikop repo: github.com/konpyutaika/nifikop resources: - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiCluster - # TODO(user): Update the package path for your API if the below value is incorrect. path: github.com/konpyutaika/nifikop/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiUserGroup - # TODO(user): Update the package path for your API if the below value is incorrect. path: github.com/konpyutaika/nifikop/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiUser - # TODO(user): Update the package path for your API if the below value is incorrect. path: github.com/Okonpyutaika/nifikop/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiRegistryClient - # TODO(user): Update the package path for your API if the below value is incorrect. path: github.com/Okonpyutaika/nifikop/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiDataflow - # TODO(user): Update the package path for your API if the below value is incorrect. path: github.com/konpyutaika/nifikop/api/v1alpha1 version: v1alpha1 - api: crdVersion: v1 - # TODO(user): Uncomment the below line if this resource's CRD is namespace scoped, else delete it. - # namespaced: true - # TODO(user): Uncomment the below line if this resource implements a controller, else delete it. - # controller: true domain: konpyutaika.com group: nifi kind: NifiParameterContext - # TODO(user): Update the package path for your API if the below value is incorrect. + path: github.com/konpyutaika/nifikop/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: konpyutaika.com + group: nifi + kind: NifiNodeGroupAutoscaler path: github.com/konpyutaika/nifikop/api/v1alpha1 version: v1alpha1 version: "3" -plugins: - manifests.sdk.operatorframework.io/v2: {} - scorecard.sdk.operatorframework.io/v2: {} diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 79308af894..1903632983 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -2,6 +2,8 @@ package v1alpha1 import ( "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // DataflowState defines the state of a NifiDataflow @@ -25,10 +27,22 @@ type ActionStep string // ClusterState holds info about the cluster state type ClusterState string +// NodeGroupAutoscalerState holds info autoscaler state +type NodeGroupAutoscalerState string + +// ClusterReplicas holds info about the current number of replicas in the cluster +type ClusterReplicas int32 + +// ClusterReplicaSelector holds info about the pod selector for cluster replicas +type ClusterReplicaSelector string + +// ClusterScalingStrategy holds info about how a cluster should be scaled +type ClusterScalingStrategy string + // ConfigurationState holds info about the configuration state type ConfigurationState string -// InitClusterNode holds info about if the node was part of the init cluster setup +// InitClusterNode holds info about if the node was part of the init cluster setup type InitClusterNode bool // PKIBackend represents an interface implementing the PKIManager @@ -325,6 +339,12 @@ type NodeState struct { InitClusterNode InitClusterNode `json:"initClusterNode"` // PodIsReady whether or not the associated pod is ready PodIsReady bool `json:"podIsReady"` + // CreationTime is the time at which this node was created. This must be sortable. + // +optional + CreationTime *metav1.Time `json:"creationTime,omitempty"` + // LastUpdatedTime is the last time at which this node was updated. This must be sortable. + // +optional + LastUpdatedTime metav1.Time `json:"lastUpdatedTime,omitempty"` } // RackAwarenessState holds info about rack awareness status @@ -392,6 +412,24 @@ const ( NotInitClusterNode InitClusterNode = false ) +const ( + // AutoscalerStateOutOfSync describes the status of a NifiNodeGroupAutoscaler as out of sync + AutoscalerStateOutOfSync NodeGroupAutoscalerState = "OutOfSync" + // AutoscalerStateInSync describes the status of a NifiNodeGroupAutoscaler as in sync + AutoscalerStateInSync NodeGroupAutoscalerState = "InSync" + + // upscale strategy representing 'Scale > Disconnect the nodes > Offload data > Reconnect the node' strategy + GracefulClusterUpscaleStrategy ClusterScalingStrategy = "graceful" + // simply add a node to the cluster and nothing else + SimpleClusterUpscaleStrategy ClusterScalingStrategy = "simple" + // downscale strategy to remove the last node added + LIFOClusterDownscaleStrategy ClusterScalingStrategy = "lifo" + // downscale strategy avoiding primary/coordinator nodes + NonPrimaryClusterDownscaleStrategy ClusterScalingStrategy = "nonprimary" + // downscale strategy targeting nodes which are least busy in terms of # flowfiles in queues + LeastBusyClusterDownscaleStrategy ClusterScalingStrategy = "leastbusy" +) + func ClusterRefsEquals(clusterRefs []ClusterReference) bool { c1 := clusterRefs[0] name := c1.Name diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index cec5db1439..46580f16a9 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -2,6 +2,8 @@ package v1alpha1 import ( "fmt" + "sort" + "strconv" "strings" cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1" @@ -15,7 +17,7 @@ const ( HttpListenerType = "http" HttpsListenerType = "https" S2sListenerType = "s2s" - prometheusListenerType = "prometheus" + PrometheusListenerType = "prometheus" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! @@ -75,7 +77,7 @@ type NifiClusterSpec struct { // NodeUserIdentityTemplate specifies the template to be used when naming the node user identity (e.g. node-%d-mysuffix) NodeUserIdentityTemplate *string `json:"nodeUserIdentityTemplate,omitempty"` // all node requires an image, unique id, and storageConfigs settings - Nodes []Node `json:"nodes"` + Nodes []Node `json:"nodes" patchStrategy:"merge" patchMergeKey:"id"` // Defines the configuration for PodDisruptionBudget DisruptionBudget DisruptionBudget `json:"disruptionBudget,omitempty"` // LdapConfiguration specifies the configuration if you want to use LDAP @@ -161,6 +163,9 @@ type Node struct { ReadOnlyConfig *ReadOnlyConfig `json:"readOnlyConfig,omitempty"` // node configuration NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` + // Labels are used to distinguish nodes from one another. They are also used by NifiNodeGroupAutoscaler + // to be automatically scaled. See NifiNodeGroupAutoscaler.Spec.NodeLabelsSelector + Labels map[string]string `json:"labels,omitempty"` } type ReadOnlyConfig struct { @@ -726,7 +731,7 @@ func (nProperties NifiProperties) GetAuthorizer() string { func (nSpec *NifiClusterSpec) GetMetricPort() *int { for _, iListener := range nSpec.ListenersConfig.InternalListeners { - if iListener.Type == prometheusListenerType { + if iListener.Type == PrometheusListenerType { val := int(iListener.ContainerPort) return &val } @@ -804,3 +809,46 @@ func (cluster NifiCluster) IsReady() bool { func (cluster *NifiCluster) Id() string { return cluster.Name } + +type Pair struct { + Key string + Value metav1.Time +} +type PairList []Pair + +func (p PairList) Len() int { return len(p) } +func (p PairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p PairList) Less(i, j int) bool { return p[i].Value.Before(&p[j].Value) } + +// Order the nodes in the cluster by the time they were created. The list will be in ascending order. +// Older nodes will be in the beginning of the list, newer nodes at the end. +// Nodes for Clusters that existed prior to this feature (v0.11.0+) will not have a creationTime. In this case, +// LIFO will not be able to reliably determine the oldest node. A rolling restart of nodes in the cluster will +// resolve this issue going forward. +func (cluster *NifiCluster) GetCreationTimeOrderedNodes() []Node { + nodeIdCreationPairs := PairList{} + + for k, v := range cluster.Status.NodesState { + nodeIdCreationPairs = append(nodeIdCreationPairs, Pair{k, *v.CreationTime}) + } + + // nodeIdCreationPairs is now sorted by creation time in ascending order. + sort.Sort(nodeIdCreationPairs) + + nodesMap := NodesToIdMap(cluster.Spec.Nodes) + timeOrderedNodes := []Node{} + + for _, pair := range nodeIdCreationPairs { + id, _ := strconv.Atoi(pair.Key) + timeOrderedNodes = append(timeOrderedNodes, nodesMap[int32(id)]) + } + return timeOrderedNodes +} + +func NodesToIdMap(nodes []Node) (nodeMap map[int32]Node) { + nodeMap = make(map[int32]Node) + for _, node := range nodes { + nodeMap[node.Id] = node + } + return +} diff --git a/api/v1alpha1/nificluster_types_test.go b/api/v1alpha1/nificluster_types_test.go new file mode 100644 index 0000000000..ff5a386c9b --- /dev/null +++ b/api/v1alpha1/nificluster_types_test.go @@ -0,0 +1,51 @@ +package v1alpha1 + +import ( + "testing" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetCreationTimeOrderedNodes(t *testing.T) { + time1 := v1.NewTime(time.Now().UTC().Add(time.Duration(5) * time.Hour)) + time2 := v1.NewTime(time.Now().UTC().Add(time.Duration(10) * time.Hour)) + time3 := v1.NewTime(time.Now().UTC().Add(time.Duration(15) * time.Hour)) + time4 := v1.NewTime(time.Now().UTC().Add(time.Duration(20) * time.Hour)) + + cluster := &NifiCluster{ + Spec: NifiClusterSpec{ + Nodes: []Node{ + {Id: 2, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 3, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 4, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 5, NodeConfigGroup: "other-group", Labels: map[string]string{"other_group": "true"}}, + }, + }, + Status: NifiClusterStatus{ + NodesState: map[string]NodeState{ + "2": { + CreationTime: &time1, + }, + "3": { + CreationTime: &time3, + }, + "4": { + CreationTime: &time2, + }, + "5": { + CreationTime: &time4, + }, + }, + }, + } + + nodeList := cluster.GetCreationTimeOrderedNodes() + + if len(nodeList) != 4 { + t.Errorf("Incorrect node list: %v+", nodeList) + } + if nodeList[0].Id != 2 || nodeList[1].Id != 4 || nodeList[2].Id != 3 || nodeList[3].Id != 5 { + t.Errorf("Incorrect node list: %v+", nodeList) + } +} diff --git a/api/v1alpha1/nifinodegroupautoscaler_types.go b/api/v1alpha1/nifinodegroupautoscaler_types.go new file mode 100644 index 0000000000..740b13f48f --- /dev/null +++ b/api/v1alpha1/nifinodegroupautoscaler_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2020. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NifiNodeGroupAutoscalerSpec defines the desired state of NifiNodeGroupAutoscaler +type NifiNodeGroupAutoscalerSpec struct { + // contains the reference to the NifiCluster with the one the dataflow is linked. + ClusterRef ClusterReference `json:"clusterRef"` + // reference to the nodeConfigGroup that will be set for nodes that are managed and autoscaled + // This Id is used to compute the names of some Kubernetes resources, so it must be a safe value. + // +kubebuilder:validation:Pattern:="[a-z0-9]([-a-z0-9]*[a-z0-9])?" + // +kubebuilder:validation:MaxLength:=63 + NodeConfigGroupId string `json:"nodeConfigGroupId"` + // A label selector used to identify & manage Node objects in the referenced NifiCluster. Any node matching this + // selector will be managed by this autoscaler. Even if that node was previously statically defined. + NodeLabelsSelector *metav1.LabelSelector `json:"nodeLabelsSelector"` + // the node readOnlyConfig for each node in the node group + // +optional + ReadOnlyConfig *ReadOnlyConfig `json:"readOnlyConfig,omitempty"` + // the nodeConfig to use for each node in the node group. This will be merged with and is preferred to the configured + // nodeConfigGroupId + // +optional + NodeConfig *NodeConfig `json:"nodeConfig,omitempty"` + // current number of replicas expected for the node config group + // +kubebuilder:default:=0 + // +optional + Replicas int32 `json:"replicas"` + // The strategy to use when scaling up the nifi cluster + // +kubebuilder:validation:Enum={"graceful","simple"} + UpscaleStrategy ClusterScalingStrategy `json:"upscaleStrategy,omitempty"` + // The strategy to use when scaling down the nifi cluster + // +kubebuilder:validation:Enum={"lifo","nonprimary","leastbusy"} + DownscaleStrategy ClusterScalingStrategy `json:"downscaleStrategy,omitempty"` +} + +// NifiNodeGroupAutoscalerStatus defines the observed state of NifiNodeGroupAutoscaler +type NifiNodeGroupAutoscalerStatus struct { + // The state of this autoscaler + State NodeGroupAutoscalerState `json:"state"` + // the current number of replicas in this cluster + Replicas ClusterReplicas `json:"replicas"` + // label selectors for cluster child pods. HPA uses this to identify pod replicas + Selector ClusterReplicaSelector `json:"selector"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector + +// NifiNodeGroupAutoscaler is the Schema for the nifinodegroupautoscalers API +type NifiNodeGroupAutoscaler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NifiNodeGroupAutoscalerSpec `json:"spec,omitempty"` + Status NifiNodeGroupAutoscalerStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NifiNodeGroupAutoscalerList contains a list of NifiNodeGroupAutoscaler +type NifiNodeGroupAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NifiNodeGroupAutoscaler `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NifiNodeGroupAutoscaler{}, &NifiNodeGroupAutoscalerList{}) +} + +func (aSpec *NifiNodeGroupAutoscalerSpec) NifiNodeGroupSelectorAsMap() (map[string]string, error) { + labels, err := metav1.LabelSelectorAsMap(aSpec.NodeLabelsSelector) + if err != nil { + return nil, err + } + return labels, nil +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0cf58fb4e6..b9472f6214 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -24,6 +24,7 @@ package v1alpha1 import ( metav1 "github.com/jetstack/cert-manager/pkg/apis/meta/v1" "k8s.io/api/core/v1" + apismetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -548,7 +549,7 @@ func (in *NifiClusterStatus) DeepCopyInto(out *NifiClusterStatus) { in, out := &in.NodesState, &out.NodesState *out = make(map[string]NodeState, len(*in)) for key, val := range *in { - (*out)[key] = val + (*out)[key] = *val.DeepCopy() } } out.RollingUpgrade = in.RollingUpgrade @@ -705,6 +706,111 @@ func (in *NifiDataflowStatus) DeepCopy() *NifiDataflowStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiNodeGroupAutoscaler) DeepCopyInto(out *NifiNodeGroupAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiNodeGroupAutoscaler. +func (in *NifiNodeGroupAutoscaler) DeepCopy() *NifiNodeGroupAutoscaler { + if in == nil { + return nil + } + out := new(NifiNodeGroupAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NifiNodeGroupAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiNodeGroupAutoscalerList) DeepCopyInto(out *NifiNodeGroupAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NifiNodeGroupAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiNodeGroupAutoscalerList. +func (in *NifiNodeGroupAutoscalerList) DeepCopy() *NifiNodeGroupAutoscalerList { + if in == nil { + return nil + } + out := new(NifiNodeGroupAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NifiNodeGroupAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiNodeGroupAutoscalerSpec) DeepCopyInto(out *NifiNodeGroupAutoscalerSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.NodeLabelsSelector != nil { + in, out := &in.NodeLabelsSelector, &out.NodeLabelsSelector + *out = new(apismetav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ReadOnlyConfig != nil { + in, out := &in.ReadOnlyConfig, &out.ReadOnlyConfig + *out = new(ReadOnlyConfig) + (*in).DeepCopyInto(*out) + } + if in.NodeConfig != nil { + in, out := &in.NodeConfig, &out.NodeConfig + *out = new(NodeConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiNodeGroupAutoscalerSpec. +func (in *NifiNodeGroupAutoscalerSpec) DeepCopy() *NifiNodeGroupAutoscalerSpec { + if in == nil { + return nil + } + out := new(NifiNodeGroupAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiNodeGroupAutoscalerStatus) DeepCopyInto(out *NifiNodeGroupAutoscalerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiNodeGroupAutoscalerStatus. +func (in *NifiNodeGroupAutoscalerStatus) DeepCopy() *NifiNodeGroupAutoscalerStatus { + if in == nil { + return nil + } + out := new(NifiNodeGroupAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiParameterContext) DeepCopyInto(out *NifiParameterContext) { *out = *in @@ -1155,6 +1261,13 @@ func (in *Node) DeepCopyInto(out *Node) { *out = new(NodeConfig) (*in).DeepCopyInto(*out) } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. @@ -1257,6 +1370,11 @@ func (in *NodeConfig) DeepCopy() *NodeConfig { func (in *NodeState) DeepCopyInto(out *NodeState) { *out = *in out.GracefulActionState = in.GracefulActionState + if in.CreationTime != nil { + in, out := &in.CreationTime, &out.CreationTime + *out = (*in).DeepCopy() + } + in.LastUpdatedTime.DeepCopyInto(&out.LastUpdatedTime) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeState. @@ -1269,6 +1387,43 @@ func (in *NodeState) DeepCopy() *NodeState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pair) DeepCopyInto(out *Pair) { + *out = *in + in.Value.DeepCopyInto(&out.Value) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pair. +func (in *Pair) DeepCopy() *Pair { + if in == nil { + return nil + } + out := new(Pair) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PairList) DeepCopyInto(out *PairList) { + { + in := &in + *out = make(PairList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PairList. +func (in PairList) DeepCopy() PairList { + if in == nil { + return nil + } + out := new(PairList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Parameter) DeepCopyInto(out *Parameter) { *out = *in diff --git a/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml b/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml index 592580fdad..cabf310a6b 100644 --- a/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml +++ b/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml @@ -3788,6 +3788,13 @@ spec: description: Unique Node id format: int32 type: integer + labels: + additionalProperties: + type: string + description: Labels are used to distinguish nodes from one another. + They are also used by NifiNodeGroupAutoscaler to be automatically + scaled. See NifiNodeGroupAutoscaler.Spec.NodeLabelsSelector + type: object nodeConfig: description: node configuration properties: @@ -8436,6 +8443,11 @@ spec: configurationState: description: ConfigurationState holds info about the config type: string + creationTime: + description: CreationTime is the time at which this node was + created. This must be sortable. + format: date-time + type: string gracefulActionState: description: GracefulActionState holds info about nifi cluster action status @@ -8464,6 +8476,11 @@ spec: description: InitClusterNode contains if this nodes was part of the initial cluster type: boolean + lastUpdatedTime: + description: LastUpdatedTime is the last time at which this + node was updated. This must be sortable. + format: date-time + type: string podIsReady: description: PodIsReady whether or not the associated pod is ready diff --git a/config/crd/bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml b/config/crd/bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml new file mode 100644 index 0000000000..165582e003 --- /dev/null +++ b/config/crd/bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml @@ -0,0 +1,2713 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: nifinodegroupautoscalers.nifi.konpyutaika.com +spec: + group: nifi.konpyutaika.com + names: + kind: NifiNodeGroupAutoscaler + listKind: NifiNodeGroupAutoscalerList + plural: nifinodegroupautoscalers + singular: nifinodegroupautoscaler + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NifiNodeGroupAutoscaler is the Schema for the nifinodegroupautoscalers + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NifiNodeGroupAutoscalerSpec defines the desired state of + NifiNodeGroupAutoscaler + properties: + clusterRef: + description: contains the reference to the NifiCluster with the one + the dataflow is linked. + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + downscaleStrategy: + description: The strategy to use when scaling down the nifi cluster + enum: + - lifo + - nonprimary + - leastbusy + type: string + nodeConfig: + description: the nodeConfig to use for each node in the node group. + This will be merged with and is preferred to the configured nodeConfigGroupId + properties: + externalVolumeConfigs: + description: externalVolumeConfigs specifies a list of volume + to mount into the main container. + items: + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property + empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the + default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read + Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob + disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure + Storage Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of + Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key + ring for User, default is /etc/ceph/user.secret More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the + authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value + between 0000 and 0777 or a decimal value between 0 + and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be + projected into the volume as a file whose name is + the key and content is the value. If specified, the + listed keys will be projected into the specified paths, + and unlisted keys will not be present. If a key is + specified which is not present in the ConfigMap, the + volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set + permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: The relative path of the file to + map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys + must be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: Driver is the name of the CSI driver that + handles this volume. Consult with your admin for the + correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed + to the associated CSI driver which will determine + the default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to + the secret object containing sensitive information + to pass to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the + secret object contains more than one secret, all secret + references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for + the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific + properties that are passed to the CSI driver. Consult + your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits used + to set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode + bits. Defaults to 0644. Directories within the path + are not affected by this setting. This might be in + conflict with other options that affect the file mode, + like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set + permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back + this directory. The default is "" which means to use + the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required + for this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all + containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is + tied to the pod that defines it - it will be created before + the pod starts, and deleted when the pod is removed. \n + Use this if: a) the volume is only needed while the pod + runs, b) features of normal volumes like restoring from + snapshot or capacity tracking are needed, c) the storage + driver is specified through a storage class, and d) the + storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for + more information on the connection between this volume + type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that persist + for longer than the lifecycle of an individual pod. \n + Use CSI for light-weight local ephemeral volumes if the + CSI driver is meant to be used that way - see the documentation + of the driver for more information. \n A pod can use both + types of ephemeral volumes and persistent volumes at the + same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC + to provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the + PVC will be deleted together with the pod. The name + of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` + array entry. Pod validation will reject the pod if + the concatenated name is not valid for a PVC (for + example, too long). \n An existing PVC with that name + that is not owned by the pod will *not* be used for + the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated + PVC is removed. If such a pre-created PVC is meant + to be used by the pod, the PVC has to updated with + an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may + be useful when manually reconstructing a broken cluster. + \n This field is read-only and no changes will be + made by Kubernetes to the PVC after it has been created. + \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will be rejected + during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the + PVC that gets created from this template. The + same fields as in a PersistentVolumeClaim are + also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object + (snapshot.storage.k8s.io/VolumeSnapshot) * + An existing PVC (PersistentVolumeClaim) If + the provisioner or an external controller + can support the specified data source, it + will create a new volume based on the contents + of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always + have the same contents as the DataSourceRef + field.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup + is not specified, the specified Kind must + be in the core API group. For any other + third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which + to populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this + field is specified, volume binding will only + succeed if the type of the specified object + matches some installed volume populator or + dynamic provisioner. This field will replace + the functionality of the DataSource field + and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, both fields (DataSource and + DataSourceRef) will be set to the same value + automatically if one of them is empty and + the other is non-empty. There are two important + differences between DataSource and DataSourceRef: + * While DataSource only allows two specific + types of objects, DataSourceRef allows any + non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed + values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed + value is specified. (Alpha) Using this field + requires the AnyVolumeDataSource feature gate + to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup + is not specified, the specified Kind must + be in the core API group. For any other + third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than + previous value but must still be higher than + capacity recorded in the status field of the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. Value of + Filesystem is implied when not included in + claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs + and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use + for this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the + secret object containing sensitive information to + pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the + plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: Name of the dataset stored as metadata + -> name on the dataset for Flocker should be considered + as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then + exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property + empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. + Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an + InitContainer that clones the repo using git, then mount + the EmptyDir into the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain + or start with '..'. If '.' is supplied, the volume + directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on + the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs + volume to be mounted with read-only permissions. Defaults + to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or + directory on the host machine that is directly exposed + to the container. This is generally used for system agents + or other privileged things that are allowed to see the + host machine. Most containers will NOT need this. More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host + directory mounts and who can/can not mount host directories + as read/write.' + properties: + path: + description: 'Path of the directory on the host. If + the path is a symlink, it will follow the link to + the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that + is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new + iSCSI interface : will + be created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI + transport. Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is + either an IP or ip_addr:port if the port is other + than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + nfs: + description: 'NFS represents an NFS mount on the host that + shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to + mount Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. Directories within the + path are not affected by this setting. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced + ConfigMap will be projected into the volume + as a file whose name is the key and content + is the value. If specified, the listed keys + will be projected into the specified paths, + and unlisted keys will not be present. If + a key is specified which is not present + in the ConfigMap, the volume setup will + error unless it is marked optional. Paths + must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used + to set permissions on this file. Must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the + file to map the key to. May not be + an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used + to set permissions on this file, must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of + the container: only resources limits + and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data + to project + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced + Secret will be projected into the volume + as a file whose name is the key and content + is the value. If specified, the listed keys + will be projected into the specified paths, + and unlisted keys will not be present. If + a key is specified which is not present + in the Secret, the volume setup will error + unless it is marked optional. Paths must + be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used + to set permissions on this file. Must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the + file to map the key to. May not be + an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The kubelet + will start trying to rotate the token if + the token is older than 80 percent of its + time to live or if the token is older than + 24 hours.Defaults to 1 hour and must be + at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to + the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is + no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults + to false. + type: boolean + registry: + description: Registry represents a single or multiple + Quobyte Registry services specified as a string as + host:port pair (multiple entries are separated with + commas) which acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned Quobyte + volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to + serivceaccount user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain + for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for + ScaleIO user and other sensitive information. If this + is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with + the protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in + the ScaleIO system that is associated with this volume + source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value + between 0000 and 0777 or a decimal value between 0 + and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set + permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: The relative path of the file to + map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys + must be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for + obtaining the StorageOS API credentials. If not specified, + default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of + the StorageOS volume. Volume names are only unique + within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of + the volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows + the Kubernetes name scoping to be mirrored within + StorageOS for tighter integration. Set VolumeName + to any name to override the default behaviour. Set + to "default" if you are not using namespaces within + StorageOS. Namespaces that do not pre-exist within + StorageOS will be created. + type: string + type: object + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) + profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) + profile name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - mountPath + - name + type: object + type: array + fsGroup: + description: FSGroup define the id of the group for each volumes + in Nifi image + format: int64 + minimum: 1 + type: integer + hostAliases: + description: A list of host aliases to include in a pod's /etc/hosts + configuration in the scenario where DNS is not available. This + list takes precedence of the one at the NifiCluster.Spec.PodPolicy + level + items: + description: HostAlias holds the mapping between IP and hostnames + that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + image: + description: Docker image used by the operator to create the node + associated https://hub.docker.com/r/apache/nifi/ + type: string + imagePullPolicy: + description: imagePullPolicy define the pull policy for NiFi cluster + docker image + type: string + imagePullSecrets: + description: imagePullSecrets specifies the secret to use when + using private registry https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#localobjectreference-v1-core + items: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + isNode: + description: Set this to true if the instance is a node in a cluster. + https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup + type: boolean + nodeAffinity: + description: nodeAffinity can be specified, operator populates + this value if new pvc added later to node + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: nodeSelector can be specified, which set the pod + to fit on a node https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + type: object + podMetadata: + description: podMetadata allows to add additionnal metadata to + the node pods + properties: + annotations: + additionalProperties: + type: string + description: Additionnal annotation to merge to the resource + associated https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + labels: + additionalProperties: + type: string + description: Additionnal labels to merge to the resource associated + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + type: object + priorityClassName: + description: priorityClassName can be used to set the priority + class applied to the node + type: string + provenanceStorage: + description: provenanceStorage allow to specify the maximum amount + of data provenance information to store at a time https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties + type: string + resourcesRequirements: + description: resourceRequirements works exactly like Container + resources, the user can specify the limit and the requests through + this property https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + runAsUser: + description: RunAsUser define the id of the user to run in the + Nifi image + format: int64 + minimum: 1 + type: integer + serviceAccountName: + description: serviceAccountName specifies the serviceAccount used + for this specific node + type: string + storageConfigs: + description: storageConfigs specifies the node related configs + items: + description: StorageConfig defines the node storage configuration + properties: + mountPath: + description: Path where the volume will be mount into the + main nifi container inside the pod. + type: string + name: + description: Name of the storage config, used to name PV + to reuse into sidecars for example. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + pvcSpec: + description: Kubernetes PVC spec + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on + the contents of the specified data source. If the + AnyVolumeDataSource feature gate is enabled, this + field will always have the same contents as the DataSourceRef + field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API + group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the DataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource and + DataSourceRef) will be set to the same value automatically + if one of them is empty and the other is non-empty. + There are two important differences between DataSource + and DataSourceRef: * While DataSource only allows + two specific types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the + status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - mountPath + - name + - pvcSpec + type: object + type: array + tolerations: + description: tolerations can be specified, which set the pod's + tolerations https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#concepts + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + nodeConfigGroupId: + description: reference to the nodeConfigGroup that will be set for + nodes that are managed and autoscaled This Id is used to compute + the names of some Kubernetes resources, so it must be a safe value. + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + nodeLabelsSelector: + description: A label selector used to identify & manage Node objects + in the referenced NifiCluster. Any node matching this selector will + be managed by this autoscaler. Even if that node was previously + statically defined. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + readOnlyConfig: + description: the node readOnlyConfig for each node in the node group + properties: + additionalSharedEnvs: + description: AdditionalSharedEnvs define a set of additional env + variables that will shared between all init containers and containers + in the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + authorizerConfig: + description: Authorizer configuration that will be applied to + the node. + properties: + replaceTemplateConfigMap: + description: 'A replacement authorizers.xml template configuration + that will replace the default template. NOTE: this is a + template as seen in authorizers.go.' + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceTemplateSecretConfig: + description: 'a replacement authorizers.xml template configuration + that will replace the default template and replaceConfigMap. + NOTE: this is a template as seen in authorizers.go.' + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + bootstrapNotificationServicesConfig: + description: BootstrapNotificationServices configuration that + will be applied to the node. + properties: + replaceConfigMap: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template and + overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + bootstrapProperties: + description: BootstrapProperties configuration that will be applied + to the node. + properties: + nifiJvmMemory: + description: JVM memory settings + type: string + overrideConfigMap: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template and + configurations. + type: string + overrideSecretConfig: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + logbackConfig: + description: Logback configuration that will be applied to the + node. + properties: + replaceConfigMap: + description: logback.xml configuration that will replace the + one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: logback.xml configuration that will replace the + one produced based on template and overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + maximumEventDrivenThreadCount: + description: MaximumEventDrivenThreadCount define the maximum + number of threads for event driven processors available to the + system. + format: int32 + type: integer + maximumTimerDrivenThreadCount: + description: MaximumTimerDrivenThreadCount define the maximum + number of threads for timer driven processors available to the + system. + format: int32 + type: integer + nifiProperties: + description: NifiProperties configuration that will be applied + to the node. + properties: + authorizer: + description: Indicates which of the configured authorizers + in the authorizers.xml file to use https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration + type: string + needClientAuth: + description: Nifi security client auth + type: boolean + overrideConfigMap: + description: Additionnals nifi.properties configuration that + will override the one produced based on template and configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals nifi.properties configuration that + will override the one produced based on template, configurations + and overrideConfigMap. + type: string + overrideSecretConfig: + description: Additionnals nifi.properties configuration that + will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + webProxyHosts: + description: A comma separated list of allowed HTTP Host header + values to consider when NiFi is running securely and will + be receiving requests to a different host[:port] than it + is bound to. https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties + items: + type: string + type: array + type: object + zookeeperProperties: + description: ZookeeperProperties configuration that will be applied + to the node. + properties: + overrideConfigMap: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template and + configurations. + type: string + overrideSecretConfig: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + type: object + replicas: + default: 0 + description: current number of replicas expected for the node config + group + format: int32 + type: integer + upscaleStrategy: + description: The strategy to use when scaling up the nifi cluster + enum: + - graceful + - simple + type: string + required: + - clusterRef + - nodeConfigGroupId + - nodeLabelsSelector + type: object + status: + description: NifiNodeGroupAutoscalerStatus defines the observed state + of NifiNodeGroupAutoscaler + properties: + replicas: + description: the current number of replicas in this cluster + format: int32 + type: integer + selector: + description: label selectors for cluster child pods. HPA uses this + to identify pod replicas + type: string + state: + description: The state of this autoscaler + type: string + required: + - replicas + - selector + - state + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 47bbcd500d..bc29d6923a 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -9,6 +9,7 @@ resources: - bases/nifi.konpyutaika.com_nifiparametercontexts.yaml - bases/nifi.konpyutaika.com_nifiregistryclients.yaml +- bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -20,6 +21,7 @@ patchesStrategicMerge: #- patches/webhook_in_nifidataflows.yaml #- patches/webhook_in_nifiparametercontexts.yaml #- patches/webhook_in_nifiregistryclients.yaml +#- patches/webhook_in_nifinodegroupautoscalers.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -30,6 +32,7 @@ patchesStrategicMerge: #- patches/cainjection_in_nifidataflows.yaml #- patches/cainjection_in_nifiparametercontexts.yaml #- patches/cainjection_in_nifiregistryclients.yaml +#- patches/cainjection_in_nifinodegroupautoscalers.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_nifinodegroupautoscalers.yaml b/config/crd/patches/cainjection_in_nifinodegroupautoscalers.yaml new file mode 100644 index 0000000000..41f1956851 --- /dev/null +++ b/config/crd/patches/cainjection_in_nifinodegroupautoscalers.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: nifinodegroupautoscalers.nifi.konpyutaika.com diff --git a/config/crd/patches/webhook_in_nifinodegroupautoscalers.yaml b/config/crd/patches/webhook_in_nifinodegroupautoscalers.yaml new file mode 100644 index 0000000000..b9d6fe0724 --- /dev/null +++ b/config/crd/patches/webhook_in_nifinodegroupautoscalers.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nifinodegroupautoscalers.nifi.konpyutaika.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index d8e0dfe753..210d1ce495 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -13,4 +13,5 @@ kind: Kustomization images: - name: controller newName: ghcr.io/konpyutaika/docker-images/nifikop - newTag: 0.12.0-k8s-1.20 + newTag: 0.13.0-k8s-1.20 + newTag: 0.12.0-k8s-1.20 \ No newline at end of file diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index cf390aa335..560f3e3cbf 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -31,7 +31,7 @@ spec: - /manager args: - --leader-elect - image: ghcr.io/konpyutaika/docker-images/nifikop:v0.12.0-k8s-1.20-release + image: ghcr.io/konpyutaika/docker-images/nifikop:v0.13.0-k8s-1.20-release name: nifikop securityContext: allowPrivilegeEscalation: false diff --git a/config/rbac/nifinodegroupautoscaler_editor_role.yaml b/config/rbac/nifinodegroupautoscaler_editor_role.yaml new file mode 100644 index 0000000000..6d35ed71d6 --- /dev/null +++ b/config/rbac/nifinodegroupautoscaler_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit nifinodegroupautoscalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nifinodegroupautoscaler-editor-role +rules: +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers/status + verbs: + - get diff --git a/config/rbac/nifinodegroupautoscaler_viewer_role.yaml b/config/rbac/nifinodegroupautoscaler_viewer_role.yaml new file mode 100644 index 0000000000..c50469d32f --- /dev/null +++ b/config/rbac/nifinodegroupautoscaler_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view nifinodegroupautoscalers. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nifinodegroupautoscaler-viewer-role +rules: +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers + verbs: + - get + - list + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 48ef045fef..fc5dad337c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -168,6 +168,32 @@ rules: - get - patch - update +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers/finalizers + verbs: + - update +- apiGroups: + - nifi.konpyutaika.com + resources: + - nifinodegroupautoscalers/status + verbs: + - get + - patch + - update - apiGroups: - nifi.konpyutaika.com resources: diff --git a/config/samples/keycloak-example/step-1/operator.yaml b/config/samples/keycloak-example/step-1/operator.yaml index d578bb3061..7766d4c295 100644 --- a/config/samples/keycloak-example/step-1/operator.yaml +++ b/config/samples/keycloak-example/step-1/operator.yaml @@ -1,4 +1,4 @@ -# nifikop 0.12.0 +# nifikop 0.13.0 rbacEnable: true namespaces: - nifi diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 6778d97f4a..fb711455f0 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -4,4 +4,5 @@ resources: - nifi_v1alpha1_nifiusergroup.yaml - nifi_v1alpha1_nifidataflow.yaml - nifi_v1alpha1_nifiparametercontext.yaml +- nifi_v1alpha1_nifinodegroupautoscaler.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/nifi_v1alpha1_nificluster.yaml b/config/samples/nifi_v1alpha1_nificluster.yaml index 1e079c7bb3..43d4f77045 100644 --- a/config/samples/nifi_v1alpha1_nificluster.yaml +++ b/config/samples/nifi_v1alpha1_nificluster.yaml @@ -34,9 +34,9 @@ spec: # imagePullPolicy: IfNotPresent # command: ["sh", "-c", "cp -vr /nifi_lib/* /nifi_lib_volume/"] # volumeMounts: - # - name: extensions-repository + # - name: nifi-data-extensions-repository # mountPath: /nifi_lib_volume - # - name: logs + # - name: nifi-data-logs # mountPath: /logs # clusterImage can specify the whole nificluster image in one place clusterImage: "apache/nifi:1.12.1" diff --git a/config/samples/nifi_v1alpha1_nifinodegroupautoscaler.yaml b/config/samples/nifi_v1alpha1_nifinodegroupautoscaler.yaml new file mode 100644 index 0000000000..e430232681 --- /dev/null +++ b/config/samples/nifi_v1alpha1_nifinodegroupautoscaler.yaml @@ -0,0 +1,32 @@ +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiNodeGroupAutoscaler +metadata: + name: nifinodegroupautoscaler-sample +spec: + # contains the reference to the NifiCluster with the one the node group autoscaler is linked. + clusterRef: + name: nificluster-name + namespace: nifikop + # defines the id of the NodeConfig contained in NifiCluster.Spec.NodeConfigGroups + nodeConfigGroupId: default-node-group + # readOnlyConfig can be used to pass Nifi node config + # which has type read-only these config changes will trigger rolling upgrade + readOnlyConfig: + nifiProperties: + overrideConfigs: | + nifi.ui.banner.text=NiFiKop - Scale Group + # This is an example of a node config you can apply to each replica in this node group. + # Any settings here will override those in the configured nodeConfigGroupId + nodeConfig: + nodeSelector: + node_type: high-mem + # The selector used to identify nodes in NifiCluster.Spec.Nodes this autoscaler will manage + # Use Node.Labels in combination with this selector to clearly define which nodes will be managed by this autoscaler + nodeLabelsSelector: + matchLabels: + nifi_cr: nificluster-name + nifi_node_group: default-node-group + # the strategy used to decide how to add nodes to a nifi cluster + upscaleStrategy: simple + # the strategy used to decide how to remove nodes from an existing cluster + downscaleStrategy: lifo \ No newline at end of file diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index 75e1eba47a..dab2f3f388 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -449,7 +449,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueWithError(r.Log, "failed to update NifiDataflow "+current.Name, err) } - r.Log.Info("Ensured Dataflow") + r.Log.Debug("Ensured Dataflow") r.Recorder.Event(instance, corev1.EventTypeWarning, "Reconciled", fmt.Sprintf("Success fully ensured dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", diff --git a/controllers/nifinodegroupautoscaler_controller.go b/controllers/nifinodegroupautoscaler_controller.go new file mode 100644 index 0000000000..883bb2f4fb --- /dev/null +++ b/controllers/nifinodegroupautoscaler_controller.go @@ -0,0 +1,335 @@ +/* +Copyright 2020. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "go.uber.org/zap" + + "emperror.dev/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/autoscale" + "github.com/konpyutaika/nifikop/pkg/k8sutil" + "github.com/konpyutaika/nifikop/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var autoscalerFinalizer = "nifinodegroupautoscalers.nifi.konpyutaika.com/finalizer" + +// NifiNodeGroupAutoscalerReconciler reconciles a NifiNodeGroupAutoscaler object +type NifiNodeGroupAutoscalerReconciler struct { + runtimeClient.Client + APIReader runtimeClient.Reader + Scheme *runtime.Scheme + Log zap.Logger + Recorder record.EventRecorder + RequeueInterval int + RequeueOffset int +} + +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nifinodegroupautoscalers,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nifinodegroupautoscalers/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nifinodegroupautoscalers/finalizers,verbs=update +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nificlusters,verbs=get;list;watch;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the NifiNodeGroupAutoscaler object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile +func (r *NifiNodeGroupAutoscalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + // @TODO: Manage dead lock when pending node because not enough resources + // by implementing a brut force deletion on nificluster controller. + nodeGroupAutoscaler := &v1alpha1.NifiNodeGroupAutoscaler{} + err := r.Client.Get(ctx, req.NamespacedName, nodeGroupAutoscaler) + + if err != nil { + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. + // Return and don't requeue + return Reconciled() + } + // Error reading the object - requeue the request. + return RequeueWithError(r.Log, err.Error(), err) + } + + // Check if marked for deletion and run finalizers + if k8sutil.IsMarkedForDeletion(nodeGroupAutoscaler.ObjectMeta) { + return r.checkFinalizers(ctx, nodeGroupAutoscaler) + } + + // Ensure finalizer for cleanup on deletion + if !util.StringSliceContains(nodeGroupAutoscaler.GetFinalizers(), autoscalerFinalizer) { + r.Log.Info(fmt.Sprintf("Adding Finalizer for NifiNodeGroupAutoscaler node group %s", nodeGroupAutoscaler.Spec.NodeConfigGroupId)) + nodeGroupAutoscaler.SetFinalizers(append(nodeGroupAutoscaler.GetFinalizers(), autoscalerFinalizer)) + } + + // lookup NifiCluster reference + // we do not want cached objects here. We want an accurate state of what the cluster is right now, so bypass the client cache by using the APIReader directly. + cluster := &v1alpha1.NifiCluster{} + err = r.APIReader.Get(ctx, + types.NamespacedName{ + Name: nodeGroupAutoscaler.Spec.ClusterRef.Name, + Namespace: nodeGroupAutoscaler.Spec.ClusterRef.Namespace, + }, + cluster) + if err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("failed to look up cluster reference %v+", nodeGroupAutoscaler.Spec.ClusterRef), err) + } + + // Determine how many replicas there currently are and how many are desired for the appropriate node group + numDesiredReplicas := nodeGroupAutoscaler.Spec.Replicas + currentReplicas, err := r.getManagedNodes(nodeGroupAutoscaler, cluster.Spec.Nodes) + if err != nil { + return RequeueWithError(r.Log, "Failed to apply autoscaler node selector to cluster nodes", err) + } + numCurrentReplicas := int32(len(currentReplicas)) + + // if the current number of nodes being managed by this autoscaler is different than the replica setting, + // then set the autoscaler status to out of sync to indicate we're changing the NifiCluster node config + // Additionally, if the autoscaler state is currently out of sync then scale up/down + if numDesiredReplicas != numCurrentReplicas || nodeGroupAutoscaler.Status.State == v1alpha1.AutoscalerStateOutOfSync { + r.Log.Info(fmt.Sprintf("Replicas changed from %d to %d", numCurrentReplicas, numDesiredReplicas)) + if err = r.updateAutoscalerReplicaState(ctx, nodeGroupAutoscaler, v1alpha1.AutoscalerStateOutOfSync); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to udpate node group autoscaler state for node group %s", nodeGroupAutoscaler.Spec.NodeConfigGroupId), err) + } + + // json merge patch is a full-replace strategy. This means we must compute the entire NifiCluster.Spec.Nodes list as it should look after scaling. + // The optimistic lock here ensures that we only patch the latest version of the NifiCluster to avoid stomping on changes any other process makes. + // Ideally, we could use a strategic merge, but it's not supported for CRDs: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + clusterPatch := runtimeClient.MergeFromWithOptions(cluster.DeepCopy(), runtimeClient.MergeFromWithOptimisticLock{}) + + if numDesiredReplicas > numCurrentReplicas { + // need to increase node group + numNodesToAdd := numDesiredReplicas - numCurrentReplicas + r.Log.Info(fmt.Sprintf("Adding %d more nodes to cluster %s spec.nodes configuration for node group %s", numNodesToAdd, cluster.Name, nodeGroupAutoscaler.Spec.NodeConfigGroupId)) + + if err = r.scaleUp(nodeGroupAutoscaler, cluster, numNodesToAdd); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to scale cluster %s up for node group %s", cluster.Name, nodeGroupAutoscaler.Spec.NodeConfigGroupId), err) + } + + } else if numDesiredReplicas < numCurrentReplicas { + // need to decrease node group + numNodesToRemove := numCurrentReplicas - numDesiredReplicas + r.Log.Info(fmt.Sprintf("Removing %d nodes from cluster %s spec.nodes configuration for node group %s", numNodesToRemove, cluster.Name, nodeGroupAutoscaler.Spec.NodeConfigGroupId)) + + if err = r.scaleDown(nodeGroupAutoscaler, cluster, numNodesToRemove); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to scale cluster %s down for node group %s", cluster.Name, nodeGroupAutoscaler.Spec.NodeConfigGroupId), err) + } + } + + // patch nificluster resource with added/removed nodes + if err = r.Client.Patch(ctx, cluster, clusterPatch); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to patch nifi cluster with changes in nodes. Tried to apply the following patch:\n %v+", clusterPatch), err) + } + + // update autoscaler state to InSync. + if err = r.updateAutoscalerReplicaState(ctx, nodeGroupAutoscaler, v1alpha1.AutoscalerStateInSync); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to udpate node group autoscaler state for node group %s", nodeGroupAutoscaler.Spec.NodeConfigGroupId), err) + } + } else { + r.Log.Info("Cluster replicas config and current number of replicas are the same", zap.Int32("replicas", nodeGroupAutoscaler.Spec.Replicas)) + } + + // update replica and replica status + if err = r.updateAutoscalerReplicaStatus(ctx, cluster, nodeGroupAutoscaler); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("Failed to update node group autoscaler replica status for node group %s", nodeGroupAutoscaler.Spec.NodeConfigGroupId), err) + } + + return reconcile.Result{ + RequeueAfter: util.GetRequeueInterval(r.RequeueInterval, r.RequeueOffset), + }, nil +} + +// scaleUp updates the provided cluster.Spec.Nodes list with the appropriate numNodesToAdd according to the autoscaler.Spec.UpscaleStrategy +func (r *NifiNodeGroupAutoscalerReconciler) scaleUp(autoscaler *v1alpha1.NifiNodeGroupAutoscaler, cluster *v1alpha1.NifiCluster, numNodesToAdd int32) error { + switch autoscaler.Spec.UpscaleStrategy { + // Right now Simple is the only option and the default + case v1alpha1.SimpleClusterUpscaleStrategy: + fallthrough + default: + r.Log.Info(fmt.Sprintf("Using Simple upscale strategy for cluster %s node group %s", cluster.Name, autoscaler.Spec.NodeConfigGroupId)) + simple := &autoscale.SimpleHorizontalUpscaleStrategy{ + NifiCluster: cluster, + NifiNodeGroupAutoscaler: autoscaler, + } + nodesToAdd, err := simple.ScaleUp(numNodesToAdd) + if err != nil { + return errors.WrapIf(err, "Failed to scale up using the Simple strategy.") + } + cluster.Spec.Nodes = append(cluster.Spec.Nodes, nodesToAdd...) + } + r.Recorder.Eventf(autoscaler, corev1.EventTypeNormal, "Upscaling", + "Adding %d more nodes to cluster %s spec.nodes configuration for node group %s", numNodesToAdd, cluster.Name, autoscaler.Spec.NodeConfigGroupId) + + return nil +} + +// scaleUp updates the provided cluster.Spec.Nodes list with the appropriate numNodesToRemove according to the autoscaler.Spec.DownscaleStrategy +func (r *NifiNodeGroupAutoscalerReconciler) scaleDown(autoscaler *v1alpha1.NifiNodeGroupAutoscaler, cluster *v1alpha1.NifiCluster, numNodesToRemove int32) error { + switch autoscaler.Spec.DownscaleStrategy { + + // Right now LIFO is the only option and the default + case v1alpha1.LIFOClusterDownscaleStrategy: + fallthrough + default: + r.Log.Info(fmt.Sprintf("Using LIFO downscale strategy for cluster %s node group %s", cluster.Name, autoscaler.Spec.NodeConfigGroupId)) + // remove the last n nodes from the node list + lifo := &autoscale.LIFOHorizontalDownscaleStrategy{ + NifiCluster: cluster, + NifiNodeGroupAutoscaler: autoscaler, + } + nodesToRemove, err := lifo.ScaleDown(numNodesToRemove) + if err != nil { + return errors.WrapIf(err, "Failed to scale cluster down via LIFO strategy.") + } + // remove the computed set of nodes from the cluster + cluster.Spec.Nodes = util.SubtractNodes(cluster.Spec.Nodes, nodesToRemove) + + r.Recorder.Eventf(autoscaler, corev1.EventTypeNormal, "Downscaling", + "Using LIFO downscale strategy for cluster %s node group %s", cluster.Name, autoscaler.Spec.NodeConfigGroupId) + } + + return nil +} + +// updateAutoscalerReplicaState updates the state of the autoscaler +func (r *NifiNodeGroupAutoscalerReconciler) updateAutoscalerReplicaState(ctx context.Context, autoscaler *v1alpha1.NifiNodeGroupAutoscaler, state v1alpha1.NodeGroupAutoscalerState) error { + autoscaler.Status.State = state + switch state { + case v1alpha1.AutoscalerStateInSync: + r.Recorder.Event(autoscaler, corev1.EventTypeNormal, "Synchronized", "Successfully synchronized node group autoscaler.") + case v1alpha1.AutoscalerStateOutOfSync: + r.Recorder.Event(autoscaler, corev1.EventTypeNormal, "Synchronizing", "The number of replicas for this node group has changed. Synchronizing.") + } + return r.Client.Status().Update(ctx, autoscaler) +} + +// updateAutoscalerReplicaStatus updates autoscaler replica status to inform the k8s scale subresource +func (r *NifiNodeGroupAutoscalerReconciler) updateAutoscalerReplicaStatus(ctx context.Context, nifiCluster *v1alpha1.NifiCluster, autoscaler *v1alpha1.NifiNodeGroupAutoscaler) error { + podList, err := r.getCurrentReplicaPods(ctx, autoscaler) + if err != nil { + return err + } + + replicas := v1alpha1.ClusterReplicas(int32(len(podList.Items))) + selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ + MatchLabels: autoscaler.Spec.NodeLabelsSelector.MatchLabels, + }) + if err != nil { + return errors.WrapIf(err, "Failed to get label selector to update CR") + } + + replicaSelector := v1alpha1.ClusterReplicaSelector(selector.String()) + autoscaler.Status.Replicas = replicas + autoscaler.Status.Selector = replicaSelector + + return r.Client.Status().Update(ctx, autoscaler) +} + +// getCurrentReplicaPods searches for any pods created in this node scaler's node group +func (r *NifiNodeGroupAutoscalerReconciler) getCurrentReplicaPods(ctx context.Context, autoscaler *v1alpha1.NifiNodeGroupAutoscaler) (*corev1.PodList, error) { + podList := &corev1.PodList{} + replicaLabels, err := autoscaler.Spec.NifiNodeGroupSelectorAsMap() + if err != nil { + return nil, err + } + // find replica pods for this autoscaler + labelsToMatch := []map[string]string{ + replicaLabels, + } + matchingLabels := runtimeClient.MatchingLabels(util.MergeLabels(labelsToMatch...)) + + err = r.Client.List(ctx, podList, + runtimeClient.ListOption(runtimeClient.InNamespace(autoscaler.Namespace)), runtimeClient.ListOption(matchingLabels)) + if err != nil { + return nil, errors.WrapIf(err, fmt.Sprintf("failed to query for replica podList for node group %s", autoscaler.Spec.NodeConfigGroupId)) + } + return podList, nil +} + +// getManagedNodes filters a set of nodes by an autoscaler's configured node selector +func (r *NifiNodeGroupAutoscalerReconciler) getManagedNodes(autoscaler *v1alpha1.NifiNodeGroupAutoscaler, nodes []v1alpha1.Node) (managedNodes []v1alpha1.Node, err error) { + selector, err := metav1.LabelSelectorAsSelector(autoscaler.Spec.NodeLabelsSelector) + if err != nil { + return nil, err + } + + for _, node := range nodes { + if selector.Matches(labels.Set(node.Labels)) { + managedNodes = append(managedNodes, node) + } + } + return managedNodes, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NifiNodeGroupAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.NifiNodeGroupAutoscaler{}). + Complete(r) +} + +func (r *NifiNodeGroupAutoscalerReconciler) checkFinalizers(ctx context.Context, autoscaler *v1alpha1.NifiNodeGroupAutoscaler) (reconcile.Result, error) { + r.Log.Info("NifiNodeGroupAutoscaler is marked for deletion") + + var err error + if util.StringSliceContains(autoscaler.GetFinalizers(), autoscalerFinalizer) { + // no further actions necessary prior to removing finalizer. + if err = r.removeFinalizer(ctx, autoscaler); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from autoscaler", err) + } + } + + return Reconciled() +} + +func (r *NifiNodeGroupAutoscalerReconciler) removeFinalizer(ctx context.Context, autoscaler *v1alpha1.NifiNodeGroupAutoscaler) error { + autoscaler.SetFinalizers(util.StringSliceRemove(autoscaler.GetFinalizers(), autoscalerFinalizer)) + _, err := r.updateAndFetchLatest(ctx, autoscaler) + return err +} + +func (r *NifiNodeGroupAutoscalerReconciler) updateAndFetchLatest(ctx context.Context, + autoscaler *v1alpha1.NifiNodeGroupAutoscaler) (*v1alpha1.NifiNodeGroupAutoscaler, error) { + + typeMeta := autoscaler.TypeMeta + err := r.Client.Update(ctx, autoscaler) + if err != nil { + return nil, err + } + autoscaler.TypeMeta = typeMeta + return autoscaler, nil +} diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index b7f0d67c24..fc698c985b 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -195,6 +195,7 @@ func (r *NifiUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c case errorfactory.ResourceNotReady: r.Log.Debug("generated secret not found, may not be ready", zap.String("user", instance.Name)) + return ctrl.Result{ Requeue: true, RequeueAfter: interval / 3, diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index 7e4efb67b6..a3098c1a72 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -304,6 +304,7 @@ func (r *NifiUserGroupReconciler) Reconcile(ctx context.Context, req ctrl.Reques r.Log.Debug("Ensured User Group", zap.String("userGroup", instance.Name)) + return RequeueAfter(interval) } diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 72db3073ba..9212ae326a 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" nifiv1alpha1 "github.com/konpyutaika/nifikop/api/v1alpha1" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -54,7 +54,8 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, } var err error @@ -65,25 +66,7 @@ var _ = BeforeSuite(func() { err = nifiv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = nifiv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme + //+kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) diff --git a/go.mod b/go.mod index 62a5d5e087..aeda5f6041 100644 --- a/go.mod +++ b/go.mod @@ -79,6 +79,7 @@ require ( gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 807adebeff..c359ad2c75 100644 --- a/go.sum +++ b/go.sum @@ -449,6 +449,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -497,7 +499,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= @@ -571,6 +572,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uY github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1126,8 +1128,9 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/helm/nifikop/Chart.yaml b/helm/nifikop/Chart.yaml index b0d3e2d856..25694bbaa6 100644 --- a/helm/nifikop/Chart.yaml +++ b/helm/nifikop/Chart.yaml @@ -4,8 +4,8 @@ name: nifikop home: https://github.com/konpyutaika/nifikop sources: - https://github.com/konpyutaika/nifikop -version: 0.12.0 -appVersion: 0.12.0-k8s-1.20-release +version: 0.13.0 +appVersion: 0.13.0-k8s-1.20-release icon: maintainers: - name: erdrix diff --git a/helm/nifikop/README.md b/helm/nifikop/README.md index 70bd807042..5dae6f582d 100644 --- a/helm/nifikop/README.md +++ b/helm/nifikop/README.md @@ -23,7 +23,7 @@ The following tables lists the configurable parameters of the NiFi Operator Helm | Parameter | Description | Default | | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |--------------------------| | `image.repository` | Image | `konpyutaika/nifikop` | -| `image.tag` | Image tag | `v0.12.0-k8s-1.20-release` | +| `image.tag` | Image tag | `v0.13.0-k8s-1.20-release` | | `image.pullPolicy` | Image pull policy | `Always` | | `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | | `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | diff --git a/helm/nifikop/crds/nifi.konpyutaika.com_nificlusters.yaml b/helm/nifikop/crds/nifi.konpyutaika.com_nificlusters.yaml index 592580fdad..cabf310a6b 100644 --- a/helm/nifikop/crds/nifi.konpyutaika.com_nificlusters.yaml +++ b/helm/nifikop/crds/nifi.konpyutaika.com_nificlusters.yaml @@ -3788,6 +3788,13 @@ spec: description: Unique Node id format: int32 type: integer + labels: + additionalProperties: + type: string + description: Labels are used to distinguish nodes from one another. + They are also used by NifiNodeGroupAutoscaler to be automatically + scaled. See NifiNodeGroupAutoscaler.Spec.NodeLabelsSelector + type: object nodeConfig: description: node configuration properties: @@ -8436,6 +8443,11 @@ spec: configurationState: description: ConfigurationState holds info about the config type: string + creationTime: + description: CreationTime is the time at which this node was + created. This must be sortable. + format: date-time + type: string gracefulActionState: description: GracefulActionState holds info about nifi cluster action status @@ -8464,6 +8476,11 @@ spec: description: InitClusterNode contains if this nodes was part of the initial cluster type: boolean + lastUpdatedTime: + description: LastUpdatedTime is the last time at which this + node was updated. This must be sortable. + format: date-time + type: string podIsReady: description: PodIsReady whether or not the associated pod is ready diff --git a/helm/nifikop/crds/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml b/helm/nifikop/crds/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml new file mode 100644 index 0000000000..165582e003 --- /dev/null +++ b/helm/nifikop/crds/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml @@ -0,0 +1,2713 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: nifinodegroupautoscalers.nifi.konpyutaika.com +spec: + group: nifi.konpyutaika.com + names: + kind: NifiNodeGroupAutoscaler + listKind: NifiNodeGroupAutoscalerList + plural: nifinodegroupautoscalers + singular: nifinodegroupautoscaler + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NifiNodeGroupAutoscaler is the Schema for the nifinodegroupautoscalers + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NifiNodeGroupAutoscalerSpec defines the desired state of + NifiNodeGroupAutoscaler + properties: + clusterRef: + description: contains the reference to the NifiCluster with the one + the dataflow is linked. + properties: + name: + type: string + namespace: + type: string + required: + - name + type: object + downscaleStrategy: + description: The strategy to use when scaling down the nifi cluster + enum: + - lifo + - nonprimary + - leastbusy + type: string + nodeConfig: + description: the nodeConfig to use for each node in the node group. + This will be merged with and is preferred to the configured nodeConfigGroupId + properties: + externalVolumeConfigs: + description: externalVolumeConfigs specifies a list of volume + to mount into the main container. + items: + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk + resource that is attached to a kubelet''s host machine + and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property + empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the + default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read + Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob + disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure + Storage Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of + Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key + ring for User, default is /etc/ceph/user.secret More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the + authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, + default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached + and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume + in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value + between 0000 and 0777 or a decimal value between 0 + and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in + the Data field of the referenced ConfigMap will be + projected into the volume as a file whose name is + the key and content is the value. If specified, the + listed keys will be projected into the specified paths, + and unlisted keys will not be present. If a key is + specified which is not present in the ConfigMap, the + volume setup will error unless it is marked optional. + Paths must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set + permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: The relative path of the file to + map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys + must be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: Driver is the name of the CSI driver that + handles this volume. Consult with your admin for the + correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed + to the associated CSI driver which will determine + the default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to + the secret object containing sensitive information + to pass to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the + secret object contains more than one secret, all secret + references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for + the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific + properties that are passed to the CSI driver. Consult + your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default. Must be a Optional: mode bits used + to set permissions on created files by default. Must + be an octal value between 0000 and 0777 or a decimal + value between 0 and 511. YAML accepts both octal and + decimal values, JSON requires decimal values for mode + bits. Defaults to 0644. Directories within the path + are not affected by this setting. This might be in + conflict with other options that affect the file mode, + like fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used to set + permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory + that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back + this directory. The default is "" which means to use + the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required + for this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all + containers in a pod. The default is nil which means + that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "Ephemeral represents a volume that is handled + by a cluster storage driver. The volume's lifecycle is + tied to the pod that defines it - it will be created before + the pod starts, and deleted when the pod is removed. \n + Use this if: a) the volume is only needed while the pod + runs, b) features of normal volumes like restoring from + snapshot or capacity tracking are needed, c) the storage + driver is specified through a storage class, and d) the + storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for + more information on the connection between this volume + type and PersistentVolumeClaim). \n Use PersistentVolumeClaim + or one of the vendor-specific APIs for volumes that persist + for longer than the lifecycle of an individual pod. \n + Use CSI for light-weight local ephemeral volumes if the + CSI driver is meant to be used that way - see the documentation + of the driver for more information. \n A pod can use both + types of ephemeral volumes and persistent volumes at the + same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC + to provision the volume. The pod in which this EphemeralVolumeSource + is embedded will be the owner of the PVC, i.e. the + PVC will be deleted together with the pod. The name + of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` + array entry. Pod validation will reject the pod if + the concatenated name is not valid for a PVC (for + example, too long). \n An existing PVC with that name + that is not owned by the pod will *not* be used for + the pod to avoid using an unrelated volume by mistake. + Starting the pod is then blocked until the unrelated + PVC is removed. If such a pre-created PVC is meant + to be used by the pod, the PVC has to updated with + an owner reference to the pod once the pod exists. + Normally this should not be necessary, but it may + be useful when manually reconstructing a broken cluster. + \n This field is read-only and no changes will be + made by Kubernetes to the PVC after it has been created. + \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations + that will be copied into the PVC when creating + it. No other fields are allowed and will be rejected + during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + The entire content is copied unchanged into the + PVC that gets created from this template. The + same fields as in a PersistentVolumeClaim are + also valid here. + properties: + accessModes: + description: 'AccessModes contains the desired + access modes the volume should have. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify + either: * An existing VolumeSnapshot object + (snapshot.storage.k8s.io/VolumeSnapshot) * + An existing PVC (PersistentVolumeClaim) If + the provisioner or an external controller + can support the specified data source, it + will create a new volume based on the contents + of the specified data source. If the AnyVolumeDataSource + feature gate is enabled, this field will always + have the same contents as the DataSourceRef + field.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup + is not specified, the specified Kind must + be in the core API group. For any other + third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which + to populate the volume with data, if a non-empty + volume is desired. This may be any local object + from a non-empty API group (non core object) + or a PersistentVolumeClaim object. When this + field is specified, volume binding will only + succeed if the type of the specified object + matches some installed volume populator or + dynamic provisioner. This field will replace + the functionality of the DataSource field + and as such if both fields are non-empty, + they must have the same value. For backwards + compatibility, both fields (DataSource and + DataSourceRef) will be set to the same value + automatically if one of them is empty and + the other is non-empty. There are two important + differences between DataSource and DataSourceRef: + * While DataSource only allows two specific + types of objects, DataSourceRef allows any + non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed + values (dropping them), DataSourceRef preserves + all values, and generates an error if a disallowed + value is specified. (Alpha) Using this field + requires the AnyVolumeDataSource feature gate + to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. If APIGroup + is not specified, the specified Kind must + be in the core API group. For any other + third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum + resources the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify + resource requirements that are lower than + previous value but must still be higher than + capacity recorded in the status field of the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum + amount of compute resources allowed. More + info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum + amount of compute resources required. + If Requests is omitted for a container, + it defaults to Limits if that is explicitly + specified, otherwise to an implementation-defined + value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: A label selector requirement + is a selector that contains values, + a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: operator represents a + key's relationship to a set of values. + Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of + string values. If the operator is + In or NotIn, the values array must + be non-empty. If the operator is + Exists or DoesNotExist, the values + array must be empty. This array + is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator + is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required + by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. Value of + Filesystem is implied when not included in + claim spec. + type: string + volumeName: + description: VolumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: FC represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. TODO: how do we prevent errors in the + filesystem from compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs + and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use + for this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the + secret object containing sensitive information to + pass to the plugin scripts. This may be empty if no + secret object is specified. If the secret object contains + more than one secret, all secrets are passed to the + plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: Name of the dataset stored as metadata + -> name on the dataset for Flocker should be considered + as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then + exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify + the partition as "1". Similarly, the volume partition + for /dev/sda is "0" (or you can leave the property + empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. + Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More info: + https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an + InitContainer that clones the repo using git, then mount + the EmptyDir into the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain + or start with '..'. If '.' is supplied, the volume + directory will be the git repository. Otherwise, + if specified, the volume will contain the git repository + in the subdirectory with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on + the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs + volume to be mounted with read-only permissions. Defaults + to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or + directory on the host machine that is directly exposed + to the container. This is generally used for system agents + or other privileged things that are allowed to see the + host machine. Most containers will NOT need this. More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host + directory mounts and who can/can not mount host directories + as read/write.' + properties: + path: + description: 'Path of the directory on the host. If + the path is a symlink, it will follow the link to + the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that + is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new + iSCSI interface : will + be created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI + transport. Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is + either an IP or ip_addr:port if the port is other + than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + nfs: + description: 'NFS represents an NFS mount on the host that + shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export + to be mounted with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to + mount Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits used to set permissions on created + files by default. Must be an octal value between 0000 + and 0777 or a decimal value between 0 and 511. YAML + accepts both octal and decimal values, JSON requires + decimal values for mode bits. Directories within the + path are not affected by this setting. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced + ConfigMap will be projected into the volume + as a file whose name is the key and content + is the value. If specified, the listed keys + will be projected into the specified paths, + and unlisted keys will not be present. If + a key is specified which is not present + in the ConfigMap, the volume setup will + error unless it is marked optional. Paths + must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used + to set permissions on this file. Must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the + file to map the key to. May not be + an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits used + to set permissions on this file, must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of + the container: only resources limits + and requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data + to project + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced + Secret will be projected into the volume + as a file whose name is the key and content + is the value. If specified, the listed keys + will be projected into the specified paths, + and unlisted keys will not be present. If + a key is specified which is not present + in the Secret, the volume setup will error + unless it is marked optional. Paths must + be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used + to set permissions on this file. Must + be an octal value between 0000 and + 0777 or a decimal value between 0 + and 511. YAML accepts both octal and + decimal values, JSON requires decimal + values for mode bits. If not specified, + the volume defaultMode will be used. + This might be in conflict with other + options that affect the file mode, + like fsGroup, and the result can be + other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the + file to map the key to. May not be + an absolute path. May not contain + the path element '..'. May not start + with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The kubelet + will start trying to rotate the token if + the token is older than 80 percent of its + time to live or if the token is older than + 24 hours.Defaults to 1 hour and must be + at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to + the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is + no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults + to false. + type: boolean + registry: + description: Registry represents a single or multiple + Quobyte Registry services specified as a string as + host:port pair (multiple entries are separated with + commas) which acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume + in the Backend Used with dynamically provisioned Quobyte + volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to + serivceaccount user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount + on the host that shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you + want to mount. Tip: Ensure that the filesystem type + is supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly + setting in VolumeMounts. Defaults to false. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. + Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain + for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for + ScaleIO user and other sensitive information. If this + is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with + the protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in + the ScaleIO system that is associated with this volume + source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits used to set permissions + on created files by default. Must be an octal value + between 0000 and 0777 or a decimal value between 0 + and 511. YAML accepts both octal and decimal values, + JSON requires decimal values for mode bits. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in + the Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start + with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits used to set + permissions on this file. Must be an octal value + between 0000 and 0777 or a decimal value between + 0 and 511. YAML accepts both octal and decimal + values, JSON requires decimal values for mode + bits. If not specified, the volume defaultMode + will be used. This might be in conflict with + other options that affect the file mode, like + fsGroup, and the result can be other mode bits + set.' + format: int32 + type: integer + path: + description: The relative path of the file to + map the key to. May not be an absolute path. + May not contain the path element '..'. May not + start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys + must be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for + obtaining the StorageOS API credentials. If not specified, + default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of + the StorageOS volume. Volume names are only unique + within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of + the volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows + the Kubernetes name scoping to be mirrored within + StorageOS for tighter integration. Set VolumeName + to any name to override the default behaviour. Set + to "default" if you are not using namespaces within + StorageOS. Namespaces that do not pre-exist within + StorageOS will be created. + type: string + type: object + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if + unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) + profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) + profile name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - mountPath + - name + type: object + type: array + fsGroup: + description: FSGroup define the id of the group for each volumes + in Nifi image + format: int64 + minimum: 1 + type: integer + hostAliases: + description: A list of host aliases to include in a pod's /etc/hosts + configuration in the scenario where DNS is not available. This + list takes precedence of the one at the NifiCluster.Spec.PodPolicy + level + items: + description: HostAlias holds the mapping between IP and hostnames + that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + image: + description: Docker image used by the operator to create the node + associated https://hub.docker.com/r/apache/nifi/ + type: string + imagePullPolicy: + description: imagePullPolicy define the pull policy for NiFi cluster + docker image + type: string + imagePullSecrets: + description: imagePullSecrets specifies the secret to use when + using private registry https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#localobjectreference-v1-core + items: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + isNode: + description: Set this to true if the instance is a node in a cluster. + https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup + type: boolean + nodeAffinity: + description: nodeAffinity can be specified, operator populates + this value if new pvc added later to node + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: nodeSelector can be specified, which set the pod + to fit on a node https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + type: object + podMetadata: + description: podMetadata allows to add additionnal metadata to + the node pods + properties: + annotations: + additionalProperties: + type: string + description: Additionnal annotation to merge to the resource + associated https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set + type: object + labels: + additionalProperties: + type: string + description: Additionnal labels to merge to the resource associated + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set + type: object + type: object + priorityClassName: + description: priorityClassName can be used to set the priority + class applied to the node + type: string + provenanceStorage: + description: provenanceStorage allow to specify the maximum amount + of data provenance information to store at a time https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties + type: string + resourcesRequirements: + description: resourceRequirements works exactly like Container + resources, the user can specify the limit and the requests through + this property https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + runAsUser: + description: RunAsUser define the id of the user to run in the + Nifi image + format: int64 + minimum: 1 + type: integer + serviceAccountName: + description: serviceAccountName specifies the serviceAccount used + for this specific node + type: string + storageConfigs: + description: storageConfigs specifies the node related configs + items: + description: StorageConfig defines the node storage configuration + properties: + mountPath: + description: Path where the volume will be mount into the + main nifi container inside the pod. + type: string + name: + description: Name of the storage config, used to name PV + to reuse into sidecars for example. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + pvcSpec: + description: Kubernetes PVC spec + properties: + accessModes: + description: 'AccessModes contains the desired access + modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) If the provisioner + or an external controller can support the specified + data source, it will create a new volume based on + the contents of the specified data source. If the + AnyVolumeDataSource feature gate is enabled, this + field will always have the same contents as the DataSourceRef + field.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + dataSourceRef: + description: 'Specifies the object from which to populate + the volume with data, if a non-empty volume is desired. + This may be any local object from a non-empty API + group (non core object) or a PersistentVolumeClaim + object. When this field is specified, volume binding + will only succeed if the type of the specified object + matches some installed volume populator or dynamic + provisioner. This field will replace the functionality + of the DataSource field and as such if both fields + are non-empty, they must have the same value. For + backwards compatibility, both fields (DataSource and + DataSourceRef) will be set to the same value automatically + if one of them is empty and the other is non-empty. + There are two important differences between DataSource + and DataSourceRef: * While DataSource only allows + two specific types of objects, DataSourceRef allows + any non-core object, as well as PersistentVolumeClaim + objects. * While DataSource ignores disallowed values + (dropping them), DataSourceRef preserves all values, + and generates an error if a disallowed value is specified. + (Alpha) Using this field requires the AnyVolumeDataSource + feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource + being referenced. If APIGroup is not specified, + the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources + the volume should have. If RecoverVolumeExpansionFailure + feature is enabled users are allowed to specify resource + requirements that are lower than previous value but + must still be higher than capacity recorded in the + status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount + of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount + of compute resources required. If Requests is + omitted for a container, it defaults to Limits + if that is explicitly specified, otherwise to + an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: A label query over volumes to consider + for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the + claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume + is required by the claim. Value of Filesystem is implied + when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to + the PersistentVolume backing this claim. + type: string + type: object + required: + - mountPath + - name + - pvcSpec + type: object + type: array + tolerations: + description: tolerations can be specified, which set the pod's + tolerations https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#concepts + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, allowed + values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to + the value. Valid operators are Exists and Equal. Defaults + to Equal. Exists is equivalent to wildcard for value, + so that a pod can tolerate all taints of a particular + category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of + time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the taint + forever (do not evict). Zero and negative values will + be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + type: object + nodeConfigGroupId: + description: reference to the nodeConfigGroup that will be set for + nodes that are managed and autoscaled This Id is used to compute + the names of some Kubernetes resources, so it must be a safe value. + maxLength: 63 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + nodeLabelsSelector: + description: A label selector used to identify & manage Node objects + in the referenced NifiCluster. Any node matching this selector will + be managed by this autoscaler. Even if that node was previously + statically defined. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + readOnlyConfig: + description: the node readOnlyConfig for each node in the node group + properties: + additionalSharedEnvs: + description: AdditionalSharedEnvs define a set of additional env + variables that will shared between all init containers and containers + in the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + authorizerConfig: + description: Authorizer configuration that will be applied to + the node. + properties: + replaceTemplateConfigMap: + description: 'A replacement authorizers.xml template configuration + that will replace the default template. NOTE: this is a + template as seen in authorizers.go.' + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceTemplateSecretConfig: + description: 'a replacement authorizers.xml template configuration + that will replace the default template and replaceConfigMap. + NOTE: this is a template as seen in authorizers.go.' + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + bootstrapNotificationServicesConfig: + description: BootstrapNotificationServices configuration that + will be applied to the node. + properties: + replaceConfigMap: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: bootstrap_notifications_services.xml configuration + that will replace the one produced based on template and + overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + bootstrapProperties: + description: BootstrapProperties configuration that will be applied + to the node. + properties: + nifiJvmMemory: + description: JVM memory settings + type: string + overrideConfigMap: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template and + configurations. + type: string + overrideSecretConfig: + description: Additionnals bootstrap.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + logbackConfig: + description: Logback configuration that will be applied to the + node. + properties: + replaceConfigMap: + description: logback.xml configuration that will replace the + one produced based on template + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + replaceSecretConfig: + description: logback.xml configuration that will replace the + one produced based on template and overrideConfigMap + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + maximumEventDrivenThreadCount: + description: MaximumEventDrivenThreadCount define the maximum + number of threads for event driven processors available to the + system. + format: int32 + type: integer + maximumTimerDrivenThreadCount: + description: MaximumTimerDrivenThreadCount define the maximum + number of threads for timer driven processors available to the + system. + format: int32 + type: integer + nifiProperties: + description: NifiProperties configuration that will be applied + to the node. + properties: + authorizer: + description: Indicates which of the configured authorizers + in the authorizers.xml file to use https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration + type: string + needClientAuth: + description: Nifi security client auth + type: boolean + overrideConfigMap: + description: Additionnals nifi.properties configuration that + will override the one produced based on template and configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals nifi.properties configuration that + will override the one produced based on template, configurations + and overrideConfigMap. + type: string + overrideSecretConfig: + description: Additionnals nifi.properties configuration that + will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + webProxyHosts: + description: A comma separated list of allowed HTTP Host header + values to consider when NiFi is running securely and will + be receiving requests to a different host[:port] than it + is bound to. https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties + items: + type: string + type: array + type: object + zookeeperProperties: + description: ZookeeperProperties configuration that will be applied + to the node. + properties: + overrideConfigMap: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template and + configuration + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + overrideConfigs: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template and + configurations. + type: string + overrideSecretConfig: + description: Additionnals zookeeper.properties configuration + that will override the one produced based on template, configurations, + overrideConfigMap and overrideConfigs. + properties: + data: + description: The key of the value,in data content, that + we want use. + type: string + name: + description: Name of the configmap that we want to refer. + type: string + namespace: + description: Namespace where is located the secret that + we want to refer. + type: string + required: + - data + - name + type: object + type: object + type: object + replicas: + default: 0 + description: current number of replicas expected for the node config + group + format: int32 + type: integer + upscaleStrategy: + description: The strategy to use when scaling up the nifi cluster + enum: + - graceful + - simple + type: string + required: + - clusterRef + - nodeConfigGroupId + - nodeLabelsSelector + type: object + status: + description: NifiNodeGroupAutoscalerStatus defines the observed state + of NifiNodeGroupAutoscaler + properties: + replicas: + description: the current number of replicas in this cluster + format: int32 + type: integer + selector: + description: label selectors for cluster child pods. HPA uses this + to identify pod replicas + type: string + state: + description: The state of this autoscaler + type: string + required: + - replicas + - selector + - state + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/helm/nifikop/templates/role.yaml b/helm/nifikop/templates/role.yaml index 3fba7f2377..1f12beb939 100644 --- a/helm/nifikop/templates/role.yaml +++ b/helm/nifikop/templates/role.yaml @@ -83,6 +83,7 @@ rules: - "nifidataflows" - "nifiregistryclients" - "nifiparametercontexts" + - "nifinodegroupautoscalers" verbs: - create - delete @@ -92,6 +93,18 @@ rules: - update - watch - deletecollection +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - cert-manager.io resources: @@ -117,6 +130,7 @@ rules: - nifidataflows/status - nifiregistryclients/status - nifiparametercontexts/status + - nifinodegroupautoscalers/status verbs: - get - update diff --git a/helm/nifikop/values.yaml b/helm/nifikop/values.yaml index a0898c357b..c17ca17ef5 100644 --- a/helm/nifikop/values.yaml +++ b/helm/nifikop/values.yaml @@ -2,7 +2,8 @@ ## image: repository: ghcr.io/konpyutaika/docker-images/nifikop - tag: v0.12.0-k8s-1.20-release + + tag: v0.13.0-k8s-1.20-release pullPolicy: Always imagePullSecrets: enabled: false diff --git a/main.go b/main.go index 891a83ca06..31de06372d 100644 --- a/main.go +++ b/main.go @@ -8,9 +8,9 @@ import ( "github.com/go-logr/zapr" certv1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" - "github.com/konpyutaika/nifikop/pkg/common" "sigs.k8s.io/controller-runtime/pkg/cache" + "github.com/konpyutaika/nifikop/pkg/common" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -23,6 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/healthz" "github.com/konpyutaika/nifikop/api/v1alpha1" + nifiv1alpha1 "github.com/konpyutaika/nifikop/api/v1alpha1" "github.com/konpyutaika/nifikop/controllers" // +kubebuilder:scaffold:imports ) @@ -35,6 +36,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(nifiv1alpha1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme } @@ -182,6 +184,18 @@ func main() { os.Exit(1) } + if err = (&controllers.NifiNodeGroupAutoscalerReconciler{ + Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), + Scheme: mgr.GetScheme(), + Log: *logger.Named("controllers").Named("NifiNodeGroupAutoscaler"), + Recorder: mgr.GetEventRecorderFor("nifi-node-group-autoscaler"), + RequeueInterval: multipliers.NodeGroupAutoscalerRequeueInterval, + RequeueOffset: multipliers.RequeueOffset, + }).SetupWithManager(mgr); err != nil { + logger.Error("unable to create controller", zap.String("controller", "NifiNodeGroupAutoscaler"), zap.Error(err)) + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { diff --git a/pkg/autoscale/strategy.go b/pkg/autoscale/strategy.go new file mode 100644 index 0000000000..d214ca58ac --- /dev/null +++ b/pkg/autoscale/strategy.go @@ -0,0 +1,141 @@ +package autoscale + +import ( + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sort" +) + +type HorizontalDownscaleStrategy interface { + Type() v1alpha1.ClusterScalingStrategy + + // returns the set of "numNodesToRemove" nodes that should be removed from the cluster + ScaleDown(numNodesToRemove int32) (nodesToRemove []v1alpha1.Node, err error) +} + +type HorizontalUpscaleStrategy interface { + Type() v1alpha1.ClusterScalingStrategy + + // returns the set of "numNodesToAdd" nodes that should be added to the cluster + ScaleUp(numNodesToAdd int32) (newNodes []v1alpha1.Node, err error) +} + +// LIFO downscale strategy +// Nodes are added by monotonically increasing nodeId, so LIFO is simply a strategy where the highest ID nodes are removed first. +type LIFOHorizontalDownscaleStrategy struct { + NifiCluster *v1alpha1.NifiCluster + NifiNodeGroupAutoscaler *v1alpha1.NifiNodeGroupAutoscaler +} + +// returns the set of "numNodesToRemove" nodes that should be removed from the cluster +func (lifo *LIFOHorizontalDownscaleStrategy) ScaleDown(numNodesToRemove int32) (nodesToRemove []v1alpha1.Node, err error) { + // we use the creation time-ordered nodes here so that we can remove the last nodes added to the cluster + currentNodes, err := getManagedNodes(lifo.NifiNodeGroupAutoscaler, lifo.NifiCluster.GetCreationTimeOrderedNodes()) + if err != nil { + return nil, err + } + numberOfCurrentNodes := int32(len(currentNodes)) + if numNodesToRemove > numberOfCurrentNodes || numNodesToRemove == 0 { + return []v1alpha1.Node{}, nil + } + + nodesToRemove = []v1alpha1.Node{} + nodesToRemove = append(nodesToRemove, currentNodes[numberOfCurrentNodes-numNodesToRemove:]...) + + // the last are the nodes which need to be removed + return nodesToRemove, nil +} + +func (lifo *LIFOHorizontalDownscaleStrategy) Type() v1alpha1.ClusterScalingStrategy { + return v1alpha1.LIFOClusterDownscaleStrategy +} + +// Simple upscale strategy +// A simple cluster upscale operation is simply adding a node to the existing node set +type SimpleHorizontalUpscaleStrategy struct { + NifiCluster *v1alpha1.NifiCluster + NifiNodeGroupAutoscaler *v1alpha1.NifiNodeGroupAutoscaler +} + +func (simple *SimpleHorizontalUpscaleStrategy) Type() v1alpha1.ClusterScalingStrategy { + return v1alpha1.SimpleClusterUpscaleStrategy +} + +// returns the set of "numNodesToAdd" nodes that should be added to the cluster +func (simple *SimpleHorizontalUpscaleStrategy) ScaleUp(numNodesToAdd int32) (newNodes []v1alpha1.Node, err error) { + if numNodesToAdd == 0 { + return newNodes, nil + } + autoscalingNodeLabels, err := simple.NifiNodeGroupAutoscaler.Spec.NifiNodeGroupSelectorAsMap() + if err != nil { + return nil, err + } + + // when computing new node IDs, we consider the entire cluster so that we don't inadvertntly re-use existing IDs + newNodeIds := ComputeNewNodeIds(simple.NifiCluster.Spec.Nodes, numNodesToAdd) + + for _, id := range newNodeIds { + newNodes = append(newNodes, v1alpha1.Node{ + Id: id, + NodeConfigGroup: simple.NifiNodeGroupAutoscaler.Spec.NodeConfigGroupId, + ReadOnlyConfig: simple.NifiNodeGroupAutoscaler.Spec.ReadOnlyConfig, + Labels: autoscalingNodeLabels, + NodeConfig: simple.NifiNodeGroupAutoscaler.Spec.NodeConfig, + }) + } + return +} + +// filter the set of provided nodes by the autoscaler's node selector +func getManagedNodes(autoscaler *v1alpha1.NifiNodeGroupAutoscaler, nodes []v1alpha1.Node) (managedNodes []v1alpha1.Node, err error) { + selector, err := metav1.LabelSelectorAsSelector(autoscaler.Spec.NodeLabelsSelector) + if err != nil { + return nil, err + } + + for _, node := range nodes { + if selector.Matches(labels.Set(node.Labels)) { + managedNodes = append(managedNodes, node) + } + } + return managedNodes, nil +} + +// New nodes are assigned an Id in the following manner: +// +// - Assigned node Ids will always be a non-negative integer starting with zero +// +// - extract and sort the node Ids in the provided node list +// +// - iterate through the node Id list starting with zero. For any unassigned node Id, assign it +// +// - return the list of assigned node Ids +func ComputeNewNodeIds(nodes []v1alpha1.Node, numNewNodes int32) []int32 { + nodeIdList := util.NodesToIdList(nodes) + sort.Slice(nodeIdList, func(i, j int) bool { + return nodeIdList[i] < nodeIdList[j] + }) + + newNodeIds := []int32{} + index := int32(0) + + // assign IDs in any gaps in the existing node list, starting with zero + var i int32 + for i = int32(0); i < nodeIdList[len(nodeIdList)-1] && int32(len(newNodeIds)) < numNewNodes; i++ { + if nodeIdList[index] == i { + index++ + } else { + newNodeIds = append(newNodeIds, i) + } + } + + // add any remaining nodes needed + remainder := numNewNodes - int32(len(newNodeIds)) + for j := int32(1); j <= remainder; j++ { + newNodeIds = append(newNodeIds, i+j) + } + + return newNodeIds +} diff --git a/pkg/autoscale/strategy_test.go b/pkg/autoscale/strategy_test.go new file mode 100644 index 0000000000..5d9e9186af --- /dev/null +++ b/pkg/autoscale/strategy_test.go @@ -0,0 +1,219 @@ +package autoscale + +import ( + "reflect" + "testing" + "time" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + time1 = v1.NewTime(time.Now().UTC().Add(time.Duration(5) * time.Hour)) + time2 = v1.NewTime(time.Now().UTC().Add(time.Duration(10) * time.Hour)) + time3 = v1.NewTime(time.Now().UTC().Add(time.Duration(15) * time.Hour)) + time4 = v1.NewTime(time.Now().UTC().Add(time.Duration(20) * time.Hour)) +) + +var lifo = LIFOHorizontalDownscaleStrategy{ + NifiNodeGroupAutoscaler: &v1alpha1.NifiNodeGroupAutoscaler{ + Spec: v1alpha1.NifiNodeGroupAutoscalerSpec{ + NodeLabelsSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"scale_me": "true"}, + }, + }, + }, + NifiCluster: &v1alpha1.NifiCluster{ + Spec: v1alpha1.NifiClusterSpec{ + Nodes: []v1alpha1.Node{ + {Id: 2, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 3, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 4, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 5, NodeConfigGroup: "other-group", Labels: map[string]string{"other_group": "true"}}, + }, + }, + Status: v1alpha1.NifiClusterStatus{ + NodesState: map[string]v1alpha1.NodeState{ + "2": { + CreationTime: &time1, + }, + "3": { + CreationTime: &time2, + }, + "4": { + CreationTime: &time3, + }, + "5": { + CreationTime: &time4, + }, + }, + }, + }, +} + +var simple = SimpleHorizontalUpscaleStrategy{ + NifiNodeGroupAutoscaler: &v1alpha1.NifiNodeGroupAutoscaler{ + Spec: v1alpha1.NifiNodeGroupAutoscalerSpec{ + NodeLabelsSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{"scale_me": "true"}, + }, + }, + }, + NifiCluster: &v1alpha1.NifiCluster{ + Spec: v1alpha1.NifiClusterSpec{ + Nodes: []v1alpha1.Node{ + {Id: 2, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 3, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 4, NodeConfigGroup: "scale-group", Labels: map[string]string{"scale_me": "true"}}, + {Id: 5, NodeConfigGroup: "other-group", Labels: map[string]string{"other_group": "true"}}, + }, + }, + Status: v1alpha1.NifiClusterStatus{ + NodesState: map[string]v1alpha1.NodeState{ + "2": { + CreationTime: &time1, + }, + "3": { + CreationTime: &time2, + }, + "4": { + CreationTime: &time3, + }, + "5": { + CreationTime: &time4, + }, + }, + }, + }, +} + +func TestLIFORemoveAllNodes(t *testing.T) { + // this also verifies if you remove more nodes than the current set, that it just returns an empty list. + nodesToRemove, err := lifo.ScaleDown(100) + + if err != nil { + t.Error("Should not have encountered an error") + } + + if len(nodesToRemove) != 0 { + t.Error("nodesToRemove should have been empty") + } +} + +func TestLIFORemoveSomeNodes(t *testing.T) { + nodesToRemove, err := lifo.ScaleDown(2) + + if err != nil { + t.Error("Should not have encountered an error") + } + if len(nodesToRemove) != 2 { + t.Errorf("Did not remove correct number of nodes: %v+", nodesToRemove) + } + if nodesToRemove[0].Id != 3 && nodesToRemove[0].Id != 4 { + t.Errorf("Incorrect results. Nodes: %v+", nodesToRemove) + } +} + +func TestLIFORemoveOneNode(t *testing.T) { + nodesToRemove, err := lifo.ScaleDown(1) + + if err != nil { + t.Error("Should not have encountered an error") + } + if len(nodesToRemove) != 1 { + t.Errorf("Did not remove correct number of nodes: %v+", nodesToRemove) + } + + if nodesToRemove[0].Id != 4 { + t.Errorf("Incorrect results. Nodes: %v+", nodesToRemove) + } +} + +func TestLIFORemoveNoNodes(t *testing.T) { + nodesToRemove, err := lifo.ScaleDown(0) + + if err != nil { + t.Error("Should not have encountered an error") + } + if len(nodesToRemove) != 0 { + t.Error("nodesToRemove should have been empty") + } +} + +func TestSimpleAddNodes(t *testing.T) { + // 3 is enough to add nodes while considering other node config groups. nodes 0, 1, and 6 should be added. + nodesToAdd, err := simple.ScaleUp(3) + + if err != nil { + t.Error("Should not have encountered an error") + } + + if len(nodesToAdd) != 3 { + t.Error("nodesToAdd should have been 2") + } + + if nodesToAdd[0].Id != 0 || nodesToAdd[1].Id != 1 || nodesToAdd[2].Id != 6 { + t.Errorf("nodesToAdd Ids are not correct: %v+", nodesToAdd) + } +} + +func TestSimpleAddNoNodes(t *testing.T) { + nodesToAdd, err := simple.ScaleUp(0) + + if err != nil { + t.Error("Should not have encountered an error") + } + + if len(nodesToAdd) != 0 { + t.Errorf("nodesToAdd should have been empty: %v+", nodesToAdd) + } +} + +func TestComputeNewNodeIds(t *testing.T) { + nodeList := []v1alpha1.Node{ + { + Id: 1, + }, + { + Id: 2, + }, + { + Id: 5, + }, + } + + // add more nodes than size of input node list + newNodeIds := ComputeNewNodeIds(nodeList, 5) + if len(newNodeIds) != 5 { + t.Errorf("There should be 5 new nodes. %v+", newNodeIds) + } + if !reflect.DeepEqual(newNodeIds, []int32{0, 3, 4, 6, 7}) { + t.Errorf("lists are not equal. %v+", newNodeIds) + } + + // add less nodes than size of input node list + newNodeIds = ComputeNewNodeIds(nodeList, 2) + + if len(newNodeIds) != 2 { + t.Errorf("There should be 2 new nodes. %v+", newNodeIds) + } + if !reflect.DeepEqual(newNodeIds, []int32{0, 3}) { + t.Errorf("lists are not equal. %v+", newNodeIds) + } + + // add same number of nodes than size of input node list + newNodeIds = ComputeNewNodeIds(nodeList, 3) + if len(newNodeIds) != 3 { + t.Errorf("There should be 3 new nodes. %v+", newNodeIds) + } + if !reflect.DeepEqual(newNodeIds, []int32{0, 3, 4}) { + t.Errorf("lists are not equal. %v+", newNodeIds) + } + + // add zero new nodes + newNodeIds = ComputeNewNodeIds(nodeList, 0) + if len(newNodeIds) != 0 { + t.Errorf("There should be 0 new nodes. %v+", newNodeIds) + } +} diff --git a/pkg/common/common.go b/pkg/common/common.go index 5fad96d1f2..1f0d7d04c1 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -61,13 +61,14 @@ func NewClusterConnection(log *zap.Logger, config *clientconfig.NifiConfig) (nod } type RequeueConfig struct { - UserRequeueInterval int - RegistryClientRequeueInterval int - ParameterContextRequeueInterval int - UserGroupRequeueInterval int - DataFlowRequeueInterval int - ClusterTaskRequeueIntervals map[string]int - RequeueOffset int + UserRequeueInterval int + RegistryClientRequeueInterval int + NodeGroupAutoscalerRequeueInterval int + ParameterContextRequeueInterval int + UserGroupRequeueInterval int + DataFlowRequeueInterval int + ClusterTaskRequeueIntervals map[string]int + RequeueOffset int } func NewRequeueConfig() *RequeueConfig { @@ -77,12 +78,13 @@ func NewRequeueConfig() *RequeueConfig { "CLUSTER_TASK_TIMEOUT_REQUEUE_INTERVAL": util.MustConvertToInt(util.GetEnvWithDefault("CLUSTER_TASK_TIMEOUT_REQUEUE_INTERVAL", "20"), "CLUSTER_TASK_TIMEOUT_REQUEUE_INTERVAL"), "CLUSTER_TASK_NOT_READY_REQUEUE_INTERVAL": util.MustConvertToInt(util.GetEnvWithDefault("CLUSTER_TASK_NOT_READY_REQUEUE_INTERVAL", "15"), "CLUSTER_TASK_NODES_UNREACHABLE_REQUEUE_INTERVAL"), }, - UserRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("USERS_REQUEUE_INTERVAL", "15"), "USERS_REQUEUE_INTERVAL"), - RegistryClientRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("REGISTRY_CLIENT_REQUEUE_INTERVAL", "15"), "REGISTRY_CLIENT_REQUEUE_INTERVAL"), - ParameterContextRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("PARAMETER_CONTEXT_REQUEUE_INTERVAL", "15"), "PARAMETER_CONTEXT_REQUEUE_INTERVAL"), - UserGroupRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("USER_GROUP_REQUEUE_INTERVAL", "15"), "USER_GROUP_REQUEUE_INTERVAL"), - DataFlowRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("DATAFLOW_REQUEUE_INTERVAL", "15"), "DATAFLOW_REQUEUE_INTERVAL"), - RequeueOffset: util.MustConvertToInt(util.GetEnvWithDefault("REQUEUE_OFFSET", "0"), "REQUEUE_OFFSET"), + UserRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("USERS_REQUEUE_INTERVAL", "15"), "USERS_REQUEUE_INTERVAL"), + NodeGroupAutoscalerRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("NODE_GROUP_AUTOSCALER_REQUEUE_INTERVAL", "15"), "NODE_GROUP_AUTOSCALER_REQUEUE_INTERVAL"), + RegistryClientRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("REGISTRY_CLIENT_REQUEUE_INTERVAL", "15"), "REGISTRY_CLIENT_REQUEUE_INTERVAL"), + ParameterContextRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("PARAMETER_CONTEXT_REQUEUE_INTERVAL", "15"), "PARAMETER_CONTEXT_REQUEUE_INTERVAL"), + UserGroupRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("USER_GROUP_REQUEUE_INTERVAL", "15"), "USER_GROUP_REQUEUE_INTERVAL"), + DataFlowRequeueInterval: util.MustConvertToInt(util.GetEnvWithDefault("DATAFLOW_REQUEUE_INTERVAL", "15"), "DATAFLOW_REQUEUE_INTERVAL"), + RequeueOffset: util.MustConvertToInt(util.GetEnvWithDefault("REQUEUE_OFFSET", "0"), "REQUEUE_OFFSET"), } } diff --git a/pkg/k8sutil/resource.go b/pkg/k8sutil/resource.go index a3b2c6c50f..79c7492adc 100644 --- a/pkg/k8sutil/resource.go +++ b/pkg/k8sutil/resource.go @@ -116,6 +116,7 @@ func Reconcile(log zap.Logger, client runtimeClient.Client, desired runtimeClien zap.String("name", desired.GetName()), zap.String("namespace", desired.GetNamespace()), zap.String("kind", desired.GetObjectKind().GroupVersionKind().Kind)) + } return nil } diff --git a/pkg/k8sutil/status.go b/pkg/k8sutil/status.go index 9562a2ce1f..0c1a478347 100644 --- a/pkg/k8sutil/status.go +++ b/pkg/k8sutil/status.go @@ -43,6 +43,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {InitClusterNode: s}} case bool: cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {PodIsReady: s}} + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {CreationTime: &s}} + } else { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {LastUpdatedTime: s}} + } } } else if val, ok := cluster.Status.NodesState[nodeId]; ok { switch s := state.(type) { @@ -54,6 +60,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC val.InitClusterNode = s case bool: val.PodIsReady = s + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + val.CreationTime = &s + } else { + val.LastUpdatedTime = s + } } cluster.Status.NodesState[nodeId] = val } else { @@ -66,6 +78,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{InitClusterNode: s} case bool: cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{PodIsReady: s} + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {CreationTime: &s}} + } else { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {LastUpdatedTime: s}} + } } } } @@ -98,6 +116,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {InitClusterNode: s}} case bool: cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {PodIsReady: s}} + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {CreationTime: &s}} + } else { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {LastUpdatedTime: s}} + } } } else if val, ok := cluster.Status.NodesState[nodeId]; ok { switch s := state.(type) { @@ -109,6 +133,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC val.InitClusterNode = s case bool: val.PodIsReady = s + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + val.CreationTime = &s + } else { + val.LastUpdatedTime = s + } } cluster.Status.NodesState[nodeId] = val } else { @@ -121,6 +151,12 @@ func UpdateNodeStatus(c client.Client, nodeIds []string, cluster *v1alpha1.NifiC cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{InitClusterNode: s} case bool: cluster.Status.NodesState[nodeId] = v1alpha1.NodeState{PodIsReady: s} + case metav1.Time: + if cluster.Status.NodesState[nodeId].CreationTime == nil { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {CreationTime: &s}} + } else { + cluster.Status.NodesState = map[string]v1alpha1.NodeState{nodeId: {LastUpdatedTime: s}} + } } } } diff --git a/pkg/resources/nifi/nifi.go b/pkg/resources/nifi/nifi.go index 5ae8c66f2f..790be5bf59 100644 --- a/pkg/resources/nifi/nifi.go +++ b/pkg/resources/nifi/nifi.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "time" "github.com/konpyutaika/nifikop/pkg/clientwrappers/dataflow" "github.com/konpyutaika/nifikop/pkg/clientwrappers/scale" @@ -39,7 +40,6 @@ const ( nodeSecretVolumeMount = "node-config" nodeTmp = "node-tmp" - nifiDataVolumeMount = "nifi-data" serverKeystoreVolume = "server-ks-files" serverKeystorePath = "/var/run/secrets/java.io/keystores/server" @@ -152,7 +152,6 @@ func (r *Reconciler) Reconcile(log zap.Logger) error { if err != nil { return errors.WrapIfWithDetails(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind()) } - } o := r.secretConfig(node.Id, nodeConfig, serverPass, clientPass, superUsers, log) @@ -173,7 +172,7 @@ func (r *Reconciler) Reconcile(log zap.Logger) error { return errors.WrapIfWithDetails(err, "failed to reconcile resource", "resource", o.GetObjectKind().GroupVersionKind()) } } - o = r.pod(node.Id, nodeConfig, pvcs, log) + o = r.pod(node, nodeConfig, pvcs, log) err, isReady := r.reconcileNifiPod(log, o.(*corev1.Pod)) if err != nil { return err @@ -345,6 +344,23 @@ OUTERLOOP: return errors.WrapIfWithDetails(err, "could not update status for node(s)", "id(s)", node.Labels["nodeId"]) } + for _, volume := range node.Spec.Volumes { + if strings.HasPrefix(volume.Name, nifiutil.NifiDataVolumeMount) { + err = r.Client.Delete(context.TODO(), &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ + Name: volume.PersistentVolumeClaim.ClaimName, + Namespace: r.NifiCluster.Namespace, + }}) + if err != nil { + if apierrors.IsNotFound(err) { + // can happen when node was not fully initialized and now is deleted + log.Info(fmt.Sprintf("PVC for Node %s not found. Continue", node.Labels["nodeId"])) + } + + return errors.WrapIfWithDetails(err, "could not delete pvc for node", "id", node.Labels["nodeId"]) + } + } + } + err = r.Client.Delete(context.TODO(), &node) if err != nil { return errors.WrapIfWithDetails(err, "could not delete node", "id", node.Labels["nodeId"]) @@ -365,29 +381,11 @@ OUTERLOOP: return errors.WrapIfWithDetails(err, "could not delete service for node", "id", node.Labels["nodeId"]) } } - - for _, volume := range node.Spec.Volumes { - if strings.HasPrefix(volume.Name, nifiDataVolumeMount) { - err = r.Client.Delete(context.TODO(), &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{ - Name: volume.PersistentVolumeClaim.ClaimName, - Namespace: r.NifiCluster.Namespace, - }}) - if err != nil { - if apierrors.IsNotFound(err) { - // can happen when node was not fully initialized and now is deleted - log.Info("PVC for Node not found. Continue", - zap.String("nodeId", node.Labels["nodeId"])) - } - - return errors.WrapIfWithDetails(err, "could not delete pvc for node", "id", node.Labels["nodeId"]) - } - } - } - + err = k8sutil.UpdateNodeStatus(r.Client, []string{node.Labels["nodeId"]}, r.NifiCluster, v1alpha1.GracefulActionState{ ActionStep: v1alpha1.RemovePodStatus, - State: v1alpha1.GracefulDownscaleRunning, + State: v1alpha1.GracefulDownscaleSucceeded, TaskStarted: r.NifiCluster.Status.NodesState[node.Labels["nodeId"]].GracefulActionState.TaskStarted}, log) if err != nil { @@ -578,6 +576,13 @@ func (r *Reconciler) reconcileNifiPod(log zap.Logger, desiredPod *corev1.Pod) (e statusErr, "updating status for resource failed", "kind", desiredType), false } + // set node creation time + statusErr = k8sutil.UpdateNodeStatus(r.Client, []string{desiredPod.Labels["nodeId"]}, r.NifiCluster, metav1.NewTime(time.Now().UTC()), log) + if statusErr != nil { + return errorfactory.New(errorfactory.StatusUpdateError{}, + statusErr, "failed to update node status creation time", "kind", desiredType), false + } + if val, ok := r.NifiCluster.Status.NodesState[desiredPod.Labels["nodeId"]]; ok && val.GracefulActionState.State != v1alpha1.GracefulUpscaleSucceeded { gracefulActionState := v1alpha1.GracefulActionState{ErrorMessage: "", State: v1alpha1.GracefulUpscaleSucceeded} @@ -613,7 +618,7 @@ func (r *Reconciler) reconcileNifiPod(log zap.Logger, desiredPod *corev1.Pod) (e } } else { return errorfactory.New(errorfactory.TooManyResources{}, errors.New("reconcile failed"), - "more then one matching pod found", "labels", matchingLabels), false + "more than one matching pod found", "labels", matchingLabels), false } if err == nil { @@ -706,6 +711,7 @@ func (r *Reconciler) reconcileNifiPod(log zap.Logger, desiredPod *corev1.Pod) (e } } + log.Info(fmt.Sprintf("Deleting pod %s", currentPod.Name)) err = r.Client.Delete(context.TODO(), currentPod) if err != nil { return errorfactory.New(errorfactory.APIFailure{}, diff --git a/pkg/resources/nifi/pod.go b/pkg/resources/nifi/pod.go index b89514453d..138cd34d0f 100644 --- a/pkg/resources/nifi/pod.go +++ b/pkg/resources/nifi/pod.go @@ -5,7 +5,6 @@ import ( "sort" "strings" - configcommon "github.com/konpyutaika/nifikop/pkg/nificlient/config/common" "go.uber.org/zap" runtimeClient "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,7 +39,7 @@ const ( ContainerName string = "nifi" ) -func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev1.PersistentVolumeClaim, log zap.Logger) runtimeClient.Object { +func (r *Reconciler) pod(node v1alpha1.Node, nodeConfig *v1alpha1.NodeConfig, pvcs []corev1.PersistentVolumeClaim, log zap.Logger) runtimeClient.Object { zkAddress := r.NifiCluster.Spec.ZKAddress zkHostname := zk.GetHostnameAddress(zkAddress) @@ -64,7 +63,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev } if r.NifiCluster.Spec.ListenersConfig.SSLSecrets != nil { - volume = append(volume, generateVolumesForSSL(r.NifiCluster, id)...) + volume = append(volume, generateVolumesForSSL(r.NifiCluster, node.Id)...) volumeMount = append(volumeMount, generateVolumeMountForSSL()...) } @@ -75,7 +74,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev //ConfigMap: &corev1.ConfigMapVolumeSource{ Secret: &corev1.SecretVolumeSource{ //LocalObjectReference: corev1.LocalObjectReference{Name: fmt.Sprintf(templates.NodeConfigTemplate+"-%d", r.NifiCluster.Name, id)}, - SecretName: fmt.Sprintf(templates.NodeConfigTemplate+"-%d", r.NifiCluster.Name, id), + SecretName: fmt.Sprintf(templates.NodeConfigTemplate+"-%d", r.NifiCluster.Name, node.Id), DefaultMode: util.Int32Pointer(0644), }, }, @@ -120,7 +119,8 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev r.NifiCluster.Spec.Pod.Labels, nodeConfig.GetPodLabels(), nifiutil.LabelsForNifi(r.NifiCluster.Name), - {"nodeId": fmt.Sprintf("%d", id)}, + node.Labels, + {"nodeId": fmt.Sprintf("%d", node.Id)}, } // merge host aliases together, preferring the aliases in the nodeConfig @@ -136,7 +136,7 @@ func (r *Reconciler) pod(id int32, nodeConfig *v1alpha1.NodeConfig, pvcs []corev pod := &corev1.Pod{ //ObjectMeta: templates.ObjectMetaWithAnnotations( ObjectMeta: templates.ObjectMetaWithGeneratedNameAndAnnotations( - nifiutil.ComputeNodeName(id, r.NifiCluster.Name), + nifiutil.ComputeNodeName(node.Id, r.NifiCluster.Name), util.MergeLabels(labelsToMerge...), util.MergeAnnotations(anntotationsToMerge...), r.NifiCluster, ), @@ -165,7 +165,7 @@ done`, PodAntiAffinity: generatePodAntiAffinity(r.NifiCluster.Name, r.NifiCluster.Spec.OneNifiNodePerNode), }, TopologySpreadConstraints: r.NifiCluster.Spec.TopologySpreadConstraints, - Containers: r.injectAdditionalEnvVars(r.generateContainers(nodeConfig, id, podVolumeMounts, zkAddress)), + Containers: r.injectAdditionalEnvVars(r.generateContainers(nodeConfig, node.Id, podVolumeMounts, zkAddress)), HostAliases: allHostAliases, Volumes: podVolumes, RestartPolicy: corev1.RestartPolicyNever, @@ -181,7 +181,7 @@ done`, } //if r.NifiCluster.Spec.Service.HeadlessEnabled { - pod.Spec.Hostname = nifiutil.ComputeNodeName(id, r.NifiCluster.Name) + pod.Spec.Hostname = nifiutil.ComputeNodeName(node.Id, r.NifiCluster.Name) pod.Spec.Subdomain = nifiutil.ComputeRequestNiFiAllNodeService(r.NifiCluster.Name, r.NifiCluster.Spec.Service.GetServiceTemplate()) //} @@ -392,43 +392,6 @@ func (r *Reconciler) createNifiNodeContainer(nodeConfig *v1alpha1.NodeConfig, id GetServerPort(r.NifiCluster.Spec.ListenersConfig)) } - failCondition := "" - - if val, ok := r.NifiCluster.Status.NodesState[fmt.Sprint(id)]; !ok || (val.InitClusterNode != v1alpha1.IsInitClusterNode && - (val.GracefulActionState.State == v1alpha1.GracefulUpscaleRequired || - val.GracefulActionState.State == v1alpha1.GracefulUpscaleRunning)) { - failCondition = `else - echo fail to request cluster - exit 1 -` - } - - requestClusterStatus := fmt.Sprintf("curl --fail -v http://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", - nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) - - if configcommon.UseSSL(r.NifiCluster) { - requestClusterStatus = fmt.Sprintf( - "curl --fail -kv --cert /var/run/secrets/java.io/keystores/client/tls.crt --key /var/run/secrets/java.io/keystores/client/tls.key https://%s/nifi-api/controller/cluster > $NIFI_BASE_DIR/cluster.state", - nifiutil.GenerateRequestNiFiAllNodeAddressFromCluster(r.NifiCluster)) - } - - removesFileAction := fmt.Sprintf(`if %s; then - echo "Successfully query NiFi cluster" - %s - echo "state $STATUS" - if [[ -z "$STATUS" ]]; then - echo "Removing previous exec setup" - if [ -f "$NIFI_BASE_DIR/data/users.xml" ]; then rm -f $NIFI_BASE_DIR/data/users.xml; fi - if [ -f "$NIFI_BASE_DIR/data/authorizations.xml" ]; then rm -f $NIFI_BASE_DIR/data/authorizations.xml; fi - if [ -f " $NIFI_BASE_DIR/data/flow.xml.gz" ]; then rm -f $NIFI_BASE_DIR/data/flow.xml.gz; fi - fi -%s -fi -rm -f $NIFI_BASE_DIR/cluster.state `, - requestClusterStatus, - "STATUS=$(jq -r \".cluster.nodes[] | select(.address==\\\"$(hostname -f)\\\") | .status\" $NIFI_BASE_DIR/cluster.state)", - failCondition) - nodeAddress := nifiutil.ComputeHostListenerNodeAddress( id, r.NifiCluster.Name, r.NifiCluster.Namespace, r.NifiCluster.Spec.ListenersConfig.GetClusterDomain(), r.NifiCluster.Spec.ListenersConfig.UseExternalDNS, r.NifiCluster.Spec.ListenersConfig.InternalListeners, @@ -456,8 +419,7 @@ echo "Hostname is successfully binded withy IP adress"`, nodeAddress, nodeAddres } command := []string{"bash", "-ce", fmt.Sprintf(`cp ${NIFI_HOME}/tmp/* ${NIFI_HOME}/conf/ %s -%s -exec bin/nifi.sh run`, resolveIp, removesFileAction)} +exec bin/nifi.sh run`, resolveIp)} return corev1.Container{ Name: ContainerName, diff --git a/pkg/resources/nifi/pvc.go b/pkg/resources/nifi/pvc.go index e02613daea..2ecd5e63f2 100644 --- a/pkg/resources/nifi/pvc.go +++ b/pkg/resources/nifi/pvc.go @@ -23,7 +23,7 @@ func (r *Reconciler) pvc(id int32, storage v1alpha1.StorageConfig, log zap.Logge "storageName": storage.Name, }, ), - map[string]string{"mountPath": storage.MountPath, "storageName": storage.Name}, r.NifiCluster), + map[string]string{"mountPath": storage.MountPath, "storageName": fmt.Sprintf(templates.StorageNameTemplate, nifiutil.NifiDataVolumeMount, storage.Name)}, r.NifiCluster), Spec: *storage.PVCSpec, } } diff --git a/pkg/resources/templates/variables.go b/pkg/resources/templates/variables.go index 249c1f2e17..763e106655 100644 --- a/pkg/resources/templates/variables.go +++ b/pkg/resources/templates/variables.go @@ -2,6 +2,7 @@ package templates const ( NodeConfigTemplate = "%s-config" + StorageNameTemplate = "%s-%s" NodeStorageTemplate = "%s-%d-%s-storage-" ExternalClusterSecretTemplate = "%s-basic-secret" ) diff --git a/pkg/util/nifi/common.go b/pkg/util/nifi/common.go index b8a40dc443..2cabfd6da4 100644 --- a/pkg/util/nifi/common.go +++ b/pkg/util/nifi/common.go @@ -15,7 +15,8 @@ const ( NodeNameTemplate = PrefixNodeNameTemplate + RootNodeNameTemplate + SuffixNodeNameTemplate // TimeStampLayout defines the date format used. - TimeStampLayout = "Mon, 2 Jan 2006 15:04:05 GMT" + TimeStampLayout = "Mon, 2 Jan 2006 15:04:05 GMT" + NifiDataVolumeMount = "nifi-data" ) // ParseTimeStampToUnixTime parses the given CC timeStamp to time format diff --git a/pkg/util/util.go b/pkg/util/util.go index 7c216c1d2c..4dcede52bf 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -222,6 +222,31 @@ func NodesToIdList(nodes []v1alpha1.Node) (ids []int32) { return } +func NodesToIdMap(nodes []v1alpha1.Node) (nodeMap map[int32]v1alpha1.Node) { + nodeMap = make(map[int32]v1alpha1.Node) + for _, node := range nodes { + nodeMap[node.Id] = node + } + return +} + +// SubtractNodes removes nodesToRemove from the originalNodes list by the node's Ids and returns the result +func SubtractNodes(originalNodes []v1alpha1.Node, nodesToRemove []v1alpha1.Node) (results []v1alpha1.Node) { + if len(originalNodes) == 0 || len(nodesToRemove) == 0 { + return originalNodes + } + nodesToRemoveMap := NodesToIdMap(nodesToRemove) + results = []v1alpha1.Node{} + + for _, node := range originalNodes { + if _, found := nodesToRemoveMap[node.Id]; !found { + // results are those which are _not_ in the nodesToRemove map + results = append(results, node) + } + } + return results +} + // computes the max between 2 ints func Max(x, y int) int { if x < y { diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index 852420ec63..4f0b111fe6 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -1,11 +1,46 @@ package util import ( - "testing" - + "github.com/konpyutaika/nifikop/api/v1alpha1" corev1 "k8s.io/api/core/v1" + "testing" ) +func TestSubtractNodes(t *testing.T) { + sourceList := []v1alpha1.Node{ + { + Id: 1, + }, + { + Id: 2, + }, + { + Id: 3, + }, + } + + nodesToSubtract := []v1alpha1.Node{ + { + Id: 3, + }, + } + + // subtract 1 node + if results := SubtractNodes(sourceList, nodesToSubtract); len(results) != 2 { + t.Error("There should be two nodes remaining") + } + + // subtract empty list + if results := SubtractNodes(sourceList, []v1alpha1.Node{}); len(results) != 3 { + t.Error("there should be 3 results") + } + + // subtract all nodes + if results := SubtractNodes(sourceList, sourceList); len(results) != 0 { + t.Error("There should be two nodes remaining") + } +} + func TestMergeHostAliasesOverride(t *testing.T) { globalAliases := []corev1.HostAlias{ { diff --git a/site/docs/1_concepts/3_features.md b/site/docs/1_concepts/3_features.md index 6ccd1e78d8..60c7106862 100644 --- a/site/docs/1_concepts/3_features.md +++ b/site/docs/1_concepts/3_features.md @@ -43,7 +43,7 @@ In a cloud native approach, we are looking for important management features, wh Without the management of users and access policies associated, it was not possible to have a fully automated NiFi cluster setup due to : - **Node scaling :** when a new node joins the cluster it needs to have some roles like `proxy user request`, `view data` etc., by managing users and access policies we can easily create a user for this node with the right accesses. -- **Operator admin rigth :** For the operator to manage efficiently the cluster it needs a lot of rights as `deploying process groups`, `empty the queues` etc., these rights are not available by default when you set a user as [InitialAdmin](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#initial-admin-identity). Once again by giving the ability to define users and access policies we go through this. +- **Operator admin rights :** For the operator to manage efficiently the cluster it needs a lot of rights as `deploying process groups`, `empty the queues` etc., these rights are not available by default when you set a user as [InitialAdmin](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#initial-admin-identity). Once again by giving the ability to define users and access policies we go through this. - **User's access :** as seen just below we need to define the operator as `InitialAdmin`, in this situation there is no more users that can access to the web UI to manually give access to other users. That's why we extend the `InitialAdmin` concept into the operator, giving the ability to define a list of users as admins. In addition to these requirements to have a fully automated and managed cluster, we introduced some useful features : @@ -54,4 +54,14 @@ In addition to these requirements to have a fully automated and managed cluster, - **Admins :** a group giving access to everything on the NiFi Cluster, - **Readers :** a group giving access as viewer on the NiFi Cluster. -By introducing this feature we are giving you the ability to fully automate your deployment, from the NiFi Cluster to your managed NiFi Dataflow. \ No newline at end of file +By introducing this feature we are giving you the ability to fully automate your deployment, from the NiFi Cluster to your managed NiFi Dataflow. + +## Automatic horizontal NiFi cluster scaling via CRD + +NiFiKop supports automatically horizontally scaling `NifiCluster` node groups with a `NifiNodeGroupAutoscaler` custom resource. + +- **Kubernetes native :** The `NifiNodeGroupAutoscaler` controller implements the [Kubernetes scale subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource) and creates a Kubernetes [`HorizontalPodAutoscaler`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to automatically scale the pods that NiFiKop creates for `NifiCluster` deployments. +- **Metrics-driven autoscaling :** The `HorizontalPodAutoscaler` can be driven by pod usage metrics (e.g. RAM, CPU) or through NiFi application metrics scraped by Prometheus. +- **Flexible NifiCluster node group autoscaling :** The `NifiNodeGroupAutoscaler` scales specific node groups in your `NifiCluster` and you may have as many autoscalers as you like per `NifiCluster` deployment. For example, a `NifiNodeGroupAutoscaler` may manage high-memory or high-cpu sets of nodes for volume burst scenarios or it may manage every node in your cluster. + +Through this set of features, you may elect to have NiFiKop configure automatic horizontal autoscaling for any subset of nodes in your `NifiCluster` deployment. \ No newline at end of file diff --git a/site/docs/2_setup/1_getting_started.md b/site/docs/2_setup/1_getting_started.md index 23634df086..59c6051ae7 100644 --- a/site/docs/2_setup/1_getting_started.md +++ b/site/docs/2_setup/1_getting_started.md @@ -109,8 +109,8 @@ Now deploy the helm chart : helm install nifikop \ oci://ghcr.io/konpyutaika/helm-charts/nifikop \ --namespace=nifi \ - --version 0.12.0 \ - --set image.tag=v0.12.0-release \ + --version 0.13.0 \ + --set image.tag=v0.13.0-release \ --set resources.requests.memory=256Mi \ --set resources.requests.cpu=250m \ --set resources.limits.memory=256Mi \ diff --git a/site/docs/2_setup/3_install/1_customizable_install_with_helm.md b/site/docs/2_setup/3_install/1_customizable_install_with_helm.md index 2d49221137..41cf9a4813 100644 --- a/site/docs/2_setup/3_install/1_customizable_install_with_helm.md +++ b/site/docs/2_setup/3_install/1_customizable_install_with_helm.md @@ -32,7 +32,7 @@ The following tables lists the configurable parameters of the NiFi Operator Helm | Parameter | Description | Default | |----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------| | `image.repository` | Image | `ghcr.io/konpyutaika/docker-images/nifikop` | -| `image.tag` | Image tag | `v0.12.0-release` | +| `image.tag` | Image tag | `v0.13.0-release` | | `image.pullPolicy` | Image pull policy | `Always` | | `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | | `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | diff --git a/site/docs/5_references/1_nifi_cluster/5_node_state.md b/site/docs/5_references/1_nifi_cluster/5_node_state.md index 6b4b8cdb10..9b04fc0195 100644 --- a/site/docs/5_references/1_nifi_cluster/5_node_state.md +++ b/site/docs/5_references/1_nifi_cluster/5_node_state.md @@ -13,6 +13,8 @@ Holds information about nifi state |gracefulActionState|[GracefulActionState](#gracefulactionstate)| holds info about nifi cluster action status.| - | - | |configurationState|[ConfigurationState](#configurationstate)| holds info about the config.| - | - | |initClusterNode|[InitClusterNode](#initclusternode)| contains if this nodes was part of the initial cluster.| - | - | +|podIsReady|bool| True if the pod for this node is up and running. Otherwise false.| - | - | +|creationTime|[v1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time)| The time at which this node was created and added to the cluster| - | - | ## GracefulActionState diff --git a/site/docs/5_references/7_nifi_nodegroup_autoscaler.md b/site/docs/5_references/7_nifi_nodegroup_autoscaler.md new file mode 100644 index 0000000000..7faf3f5445 --- /dev/null +++ b/site/docs/5_references/7_nifi_nodegroup_autoscaler.md @@ -0,0 +1,59 @@ +--- +id: 7_nifi_nodegroup_autoscaler +title: NiFi NodeGroup Autoscaler +sidebar_label: NiFi NodeGroup Autoscaler +--- + +`NifiNodeGroupAutoscaler` is the Schema through which you configure automatic scaling of `NifiCluster` deployments. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiNodeGroupAutoscaler +metadata: + name: nifinodegroupautoscaler-sample +spec: + # contains the reference to the NifiCluster with the one the node group autoscaler is linked. + clusterRef: + name: nificluster-name + namespace: nifikop + # defines the id of the NodeConfig contained in NifiCluster.Spec.NodeConfigGroups + nodeConfigGroupId: default-node-group + # The selector used to identify nodes in NifiCluster.Spec.Nodes this autoscaler will manage + # Use Node.Labels in combination with this selector to clearly define which nodes will be managed by this autoscaler + nodeLabelsSelector: + matchLabels: + nifi_cr: nificluster-name + nifi_node_group: default-node-group + # the strategy used to decide how to add nodes to a nifi cluster + upscaleStrategy: simple + # the strategy used to decide how to remove nodes from an existing cluster + downscaleStrategy: lifo +``` + +## NifiNodeGroupAutoscaler +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects nodegroupautoscalers must create.|No|nil| +|spec|[NifiNodeGroupAutoscalerSpec](#nifinodegroupautoscalerspec)|defines the desired state of NifiNodeGroupAutoscaler.|No|nil| +|status|[NifiNodeGroupAutoscalerStatus](#nifinodegroupautoscalerstatus)|defines the observed state of NifiNodeGroupAutoscaler.|No|nil| + +## NifiNodeGroupAutoscalerSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster containing the node group this autoscaler should manage. |Yes| - | +|nodeConfigGroupId| string | defines the id of the [NodeConfig](./1_nifi_cluster/3_node_config.md) contained in `NifiCluster.Spec.NodeConfigGroups`. |Yes| - | +|nodeLabelsSelector|[LabelSelector](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#LabelSelector)| defines the set of labels used to identify nodes in a `NifiCluster` node config group. Use `Node.Labels` in combination with this selector to clearly define which nodes will be managed by this autoscaler. Take care to avoid having mutliple autoscalers managing the same nodes. |Yes| - | +|readOnlyConfig| [ReadOnlyConfig](./1_nifi_cluster/2_read_only_config.md) | defines a readOnlyConfig to apply to each node in this node group. Any settings here will override those set in the configured `nodeConfigGroupId`. |Yes| - | +|nodeConfig| [NodeConfig](./1_nifi_cluster/3_node_config.md) | defines a nodeConfig to apply to each node in this node group. Any settings here will override those set in the configured `nodeConfigGroupId`. |Yes| - | +|upscaleStrategy| string | The strategy NiFiKop will use to scale up the nodes managed by this autoscaler. Must be one of {`simple`}. |Yes| - | +|downscaleStrategy| string | The strategy NiFiKop will use to scale down the nodes managed by this autoscaler. Must be one of {`lifo`}. |Yes| - | +|replicas| int | the initial number of replicas to configure the `HorizontalPodAutoscaler` with. After the initial configuration, this `replicas` configuration will be automatically updated by the Kubernetes `HorizontalPodAutoscaler` controller. |No| 1 | + +## NifiNodeGroupAutoscalerStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|state|string| the state of the nodegroup autoscaler. This is set by the autoscaler. |No| - | +|replicas|int| the current number of replicas running in the node group this autoscaler is managing. This is set by the autoscaler.|No| - | +|selector|string| the [selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) used by the `HorizontalPodAutoscaler` controller to identify the replicas in this node group. This is set by the autoscaler.|No| - | \ No newline at end of file diff --git a/site/website/sidebars.json b/site/website/sidebars.json index 3747b504dc..8de713093f 100644 --- a/site/website/sidebars.json +++ b/site/website/sidebars.json @@ -40,6 +40,11 @@ "type": "category", "label": "Authentication", "items": ["3_tasks/2_security/2_authentication/1_oidc"] + }, + { + "type": "category", + "label": "Authorization", + "items": ["3_tasks/2_security/2_authorization/1_authorizer"] } ] }, @@ -64,7 +69,8 @@ "5_references/3_nifi_registry_client", "5_references/4_nifi_parameter_context", "5_references/5_nifi_dataflow", - "5_references/6_nifi_usergroup" + "5_references/6_nifi_usergroup", + "5_references/7_nifi_nodegroup_autoscaler" ], "Contributing": [ "6_contributing/0_contribution_organization", diff --git a/site/website/versioned_docs/version-v0.13.0/1_concepts/1_introduction.md b/site/website/versioned_docs/version-v0.13.0/1_concepts/1_introduction.md new file mode 100644 index 0000000000..d65e316acd --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/1_concepts/1_introduction.md @@ -0,0 +1,34 @@ +--- +id: 1_introduction +title: Introduction +sidebar_label: Introduction +--- + +The Konpyūtāika NiFi operator is a Kubernetes operator to automate provisioning, management, autoscaling and operations of [Apache NiFi](https://nifi.apache.org/) clusters deployed to K8s. + +## Overview + +Apache NiFi is an open-source solution that support powerful and scalable directed graphs of data routing, transformation, and system mediation logic. +Some of the high-level capabilities and objectives of Apache NiFi include, and some of the main features of the **NiFiKop** are: + +- **Fine grained** node configuration support +- Graceful rolling upgrade +- graceful NiFi cluster **scaling** +- Encrypted communication using SSL +- the provisioning of secure NiFi clusters +- Advanced Dataflow and user management via CRD + +Some of the roadmap features : + +- Monitoring via **Prometheus** +- Automatic reaction and self healing based on alerts (plugin system, with meaningful default alert plugins) +- graceful NiFi cluster **scaling and rebalancing** + +## Motivation + +There are already some approaches to operating NiFi on Kubernetes, however, we did not find them appropriate for use in a highly dynamic environment, nor capable of meeting our needs. + +- [Helm chart](https://github.com/cetic/helm-nifi) +- [Cloudera Nifi Operator](https://blog.cloudera.com/cloudera-flow-management-goes-cloud-native-with-apache-nifi-on-red-hat-openshift-kubernetes-platform/) + +Finally, our motivation is to build an open source solution and a community which drives the innovation and features of this operator. \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/1_concepts/2_design_principes.md b/site/website/versioned_docs/version-v0.13.0/1_concepts/2_design_principes.md new file mode 100644 index 0000000000..ef9e0f6e02 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/1_concepts/2_design_principes.md @@ -0,0 +1,62 @@ +--- +id: 2_design_principes +title: Design Principes +sidebar_label: Design Principes +--- + +## Pod level management + +NiFi is a stateful application. The first piece of the puzzle is the Node, which is a simple server capable of createing/forming a cluster with other Nodes. Every Node has his own **unique** configuration which differs slightly from all others. + +All NiFi on Kubernetes setup use [StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) to create a NiFi Cluster. Just to quickly recap from the K8s docs: + +>StatefulSet manages the deployment and scaling of a set of Pods, and provide guarantees about their ordering and uniqueness. Like a Deployment, a StatefulSet manages Pods that are based on an identical container spec. Unlike a Deployment, a StatefulSet maintains sticky identities for each of its Pods. These pods are created from the same spec, but are not interchangeable: each has a persistent identifier that is maintained across any rescheduling. + +How does this looks from the perspective of Apache NiFi ? + +With StatefulSet we get : +- unique Node IDs generated during Pod startup +- networking between Nodes with headless services +- unique Persistent Volumes for Nodes + +Using StatefulSet we **lose** the ability to : + +- modify the configuration of unique Nodes +- remove a specific Node from a cluster (StatefulSet always removes the most recently created Node) +- use multiple, different Persistent Volumes for each Node + +The NiFi Operator uses `simple` Pods, ConfigMaps, and PersistentVolumeClaims, instead of StatefulSet (based on the design used by [Banzai Cloud Kafka Operator](https://github.com/banzaicloud/kafka-operator)). +Using these resources allows us to build an Operator which is better suited to NiFi. + +With the NiFi operator we can: + +- modify the configuration of unique Nodes +- remove specific Nodes from clusters +- use multiple Persistent Volumes for each Node + +## Dataflow Lifecycle management + +The [Dataflow Lifecycle management feature](./3_features.md#dataflow-lifecycle-management-via-crd) introduces 3 new CRDs : + +- **NiFiRegistryClient :** Allowing you to declare a [NiFi registry client](https://nifi.apache.org/docs/nifi-registry-docs/html/getting-started.html#connect-nifi-to-the-registry). +- **NiFiParameterContext :** Allowing you to create parameter context, with two kinds of parameters, a simple `map[string]string` for non-sensitive parameters and a `list of secrets` which contains sensitive parameters. +- **NiFiDataflow :** Allowing you to declare a Dataflow based on a `NiFiRegistryClient` and optionally a `ParameterContext`, which will be deployed and managed by the operator on the `targeted NiFi cluster`. + +The following diagram shows the interactions between all the components : + +![dataflow lifecycle management schema](/img/1_concepts/2_design_principes/dataflow_lifecycle_management_schema.jpg) + +With each CRD comes a new controller, with a reconcile loop : + +- **NiFiRegistryClient's controller :** + +![NiFi registry client's reconcile loop](/img/1_concepts/2_design_principes/registry_client_reconcile_loop.jpeg) + +- **NiFiParameterContext's controller :** + +![NiFi parameter context's reconcile loop](/img/1_concepts/2_design_principes/parameter_context_reconcile_loop.jpeg) + +- **NiFiDataflow's controller :** + +![NiFi dataflow's reconcile loop](/img/1_concepts/2_design_principes/dataflow_reconcile_loop.jpeg) + diff --git a/site/website/versioned_docs/version-v0.13.0/1_concepts/3_features.md b/site/website/versioned_docs/version-v0.13.0/1_concepts/3_features.md new file mode 100644 index 0000000000..60c7106862 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/1_concepts/3_features.md @@ -0,0 +1,67 @@ +--- +id: 3_features +title: Features +sidebar_label: Features +--- + +To highligt some of the features we needed and were not possible with the operators available, please keep reading + +## Fine Grained Node Config Support + +We needed to be able to react to events in a fine-grained way for each Node - and not in the limited way StatefulSet does (which, for example, removes the most recently created Nodes). Some of the available solutions try to overcome these deficits by placing scripts inside the container to generate configs at runtime (a good example is our [Cassandra Operator](https://github.com/Orange-OpenSource/casskop)), whereas the Orange NiFi operator's configurations are deterministically placed in specific Configmaps. + +## Graceful NiFi Cluster Scaling + +Apache NiFi is a good candidate to create an operator, because everything is made to orchestrate it through REST Api calls. With this comes automation of actions such as scaling, following all required steps : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes. + +## Graceful Rolling Upgrade + +Operator supports graceful rolling upgrade. It means the operator will check if the cluster is healthy. + +## Dynamic Configuration Support + +NiFi operates with two type of configs: + +- Read-only +- PerNode + +Read only config requires node restart to update all the others may be updated dynamically. +Operator CRD distinguishes these fields, and proceed with the right action. It can be a rolling upgrade, or +a dynamic reconfiguration. + +## Dataflow lifecycle management via CRD + +In a cloud native approach, we are looking for important management features, which we have applied to NiFi Dataflow : + +- **Automated deployment :** Based on the NiFi registry, you can describe your `NiFiDataflow` resource that will be deployed and run on the targeted NiFi cluster. +- **Portability :** On kubernetes everything is a yaml file, so with NiFiKop we give you the ability to describe your clusters but also the `registry clients`, `parameter contexts` and `dataflows` of your NiFi application, so that you can redeploy the same thing in a different namespace or cluster. +- **State management :** With NiFiKop resources, you can describe what you want, and the operator deals with the NiFi Rest API to make sure the resource stays in sync (even if someone manually makes changes directly on NiFi cluster). +- **Configurations :** Based on the `Parameter Contexts`, NiFiKop allows you to associate to your `Dataflow` (= your applications) with a different configuration depending on the environment ! + +## Users and access policies management + +Without the management of users and access policies associated, it was not possible to have a fully automated NiFi cluster setup due to : + +- **Node scaling :** when a new node joins the cluster it needs to have some roles like `proxy user request`, `view data` etc., by managing users and access policies we can easily create a user for this node with the right accesses. +- **Operator admin rights :** For the operator to manage efficiently the cluster it needs a lot of rights as `deploying process groups`, `empty the queues` etc., these rights are not available by default when you set a user as [InitialAdmin](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#initial-admin-identity). Once again by giving the ability to define users and access policies we go through this. +- **User's access :** as seen just below we need to define the operator as `InitialAdmin`, in this situation there is no more users that can access to the web UI to manually give access to other users. That's why we extend the `InitialAdmin` concept into the operator, giving the ability to define a list of users as admins. + +In addition to these requirements to have a fully automated and managed cluster, we introduced some useful features : + +- **User management :** using `NifiUser` resource, you are able to create (or bind an existing) user in NiFi cluster and apply some access policies that will be managed and continuously synced by the operator. +- **Group management :** using `NifiUserGroup` resource, you can create groups in NiFi cluster and apply access policies and a list of `NifiUser` that will be managed and continuously synced by the operator. +- **Default group :** As the definition of `NifiUser` and `NifiUserGroup` resources could be heavy for some simple use cases, we also decided to define two default groups that you can feed with a list of users that will be created and managed by the operator (no kubernetes resources to create) : + - **Admins :** a group giving access to everything on the NiFi Cluster, + - **Readers :** a group giving access as viewer on the NiFi Cluster. + +By introducing this feature we are giving you the ability to fully automate your deployment, from the NiFi Cluster to your managed NiFi Dataflow. + +## Automatic horizontal NiFi cluster scaling via CRD + +NiFiKop supports automatically horizontally scaling `NifiCluster` node groups with a `NifiNodeGroupAutoscaler` custom resource. + +- **Kubernetes native :** The `NifiNodeGroupAutoscaler` controller implements the [Kubernetes scale subresource](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#scale-subresource) and creates a Kubernetes [`HorizontalPodAutoscaler`](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to automatically scale the pods that NiFiKop creates for `NifiCluster` deployments. +- **Metrics-driven autoscaling :** The `HorizontalPodAutoscaler` can be driven by pod usage metrics (e.g. RAM, CPU) or through NiFi application metrics scraped by Prometheus. +- **Flexible NifiCluster node group autoscaling :** The `NifiNodeGroupAutoscaler` scales specific node groups in your `NifiCluster` and you may have as many autoscalers as you like per `NifiCluster` deployment. For example, a `NifiNodeGroupAutoscaler` may manage high-memory or high-cpu sets of nodes for volume burst scenarios or it may manage every node in your cluster. + +Through this set of features, you may elect to have NiFiKop configure automatic horizontal autoscaling for any subset of nodes in your `NifiCluster` deployment. \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/1_concepts/4_roadmap.md b/site/website/versioned_docs/version-v0.13.0/1_concepts/4_roadmap.md new file mode 100644 index 0000000000..fe28367ea3 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/1_concepts/4_roadmap.md @@ -0,0 +1,95 @@ +--- +id: 4_roadmap +title: Roadmap +sidebar_label: Roadmap +--- + +## Available + +### NiFi cluster installation + +| | | +| --------------------- | --------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Jan 2020 | + +### Graceful NiFi Cluster Scaling + +| | | +| --------------------- | --------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Jan 2020 | + +Apache NiFi is a good candidate to create an operator, because everything is made to orchestrate it through REST Api calls. With this comes automation of actions such as scaling, following all required steps : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes. + +### Communication via SSL + +| | | +| --------------------- | -------- | +| Status | Done | +| Priority | High | +| Targeted Start date | May 2020 | + + +The operator fully automates NiFi's SSL support. +The operator can provision the required secrets and certificates for you, or you can provide your own. + +### Dataflow lifecycle management via CRD + +| | | +| --------------------- | --------- | +| Status | Done | +| Priority | High | +| Targeted Start date | Aug 2020 | + +### Users & access policies management + +| | | +| --------------------- | ----- | +| Status | Done| +| Priority | High | +| Targeted Start date | November 2020 | + +The operator fully automates NiFi's user and access policies management. + +## Backlog + +### Monitoring via Prometheus + +| | | +| --------------------- | -------- | +| Status | To Do | +| Priority | High | +| Targeted Start date | Oct 2020 | + +The NiFi operator exposes NiFi JMX metrics to Prometheus. + +### Reacting on Alerts + +| | | +| --------------------- | ----- | +| Status | To Do | +| Priority | Low | +| Targeted Start date | - | + +The NiFi Operator acts as a **Prometheus Alert Manager**. It receives alerts defined in Prometheus, and creates actions based on Prometheus alert annotations. + +Currently, there are three actions expected : +- upscale cluster (add a new Node) +- downscale cluster (remove a Node) +- add additional disk to a Node + +### Seamless Istio mesh support + +| | | +| --------------------- | ----- | +| Status | To Do | +| Priority | Low | +| Targeted Start date | - | + +- Operator allows to use ClusterIP services instead of Headless, which still works better in case of Service meshes. +- To avoid too early nifi initialization, which might lead to unready sidecar container. The operator will use a small script to +mitigate this behaviour. All NiFi image can be used the only one requirement is an available **wget** command. +- To access a NiFi cluster which runs inside the mesh. Operator will supports creating Istio ingress gateways. \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/2_setup/1_getting_started.md b/site/website/versioned_docs/version-v0.13.0/2_setup/1_getting_started.md new file mode 100644 index 0000000000..59c6051ae7 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/2_setup/1_getting_started.md @@ -0,0 +1,152 @@ +--- +id: 1_getting_started +title: Getting Started +sidebar_label: Getting Started +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +The operator installs the 1.12.1 version of Apache NiFi, can run on Minikube v0.33.1+ and **Kubernetes 1.21.0+**, and require **Helm 3**. + +:::info +The operator supports NiFi 1.11.0+ +::: + +As a pre-requisite it needs a Kubernetes cluster. Also, NiFi requires Zookeeper so you need to first have a Zookeeper cluster if you don't already have one. + +> We believe in the `separation of concerns` principle, thus the NiFi operator does not install nor manage Zookeeper. + +## Prerequisites + +### Install Zookeeper + +To install Zookeeper we recommend using the [Bitnami's Zookeeper chart](https://github.com/bitnami/charts/tree/master/bitnami/zookeeper). + +```bash +helm repo add bitnami https://charts.bitnami.com/bitnami +``` + +```bash +# You have to create the namespace before executing following command +helm install zookeeper bitnami/zookeeper \ + --set resources.requests.memory=256Mi \ + --set resources.requests.cpu=250m \ + --set resources.limits.memory=256Mi \ + --set resources.limits.cpu=250m \ + --set global.storageClass=standard \ + --set networkPolicy.enabled=true \ + --set replicaCount=3 +``` + +:::warning +Replace the `storageClass` parameter value with your own. +::: + +### Install cert-manager + +The NiFiKop operator uses `cert-manager` for issuing certificates to users and and nodes, so you'll need to have it setup in case you want to deploy a secured cluster with authentication enabled. The minimum supported cert-manager version is v1.0. + + + + +```bash +# Install the CustomResourceDefinitions and cert-manager itself +kubectl apply -f \ + https://github.com/jetstack/cert-manager/releases/download/v1.7.2/cert-manager.yaml +``` + + + + +```bash +# Install CustomResourceDefinitions first +kubectl apply --validate=false -f \ + https://github.com/jetstack/cert-manager/releases/download/v1.7.2/cert-manager.crds.yaml + +# Add the jetstack helm repo +helm repo add jetstack https://charts.jetstack.io +helm repo update + +# You have to create the namespace before executing following command +helm install cert-manager \ + --namespace cert-manager \ + --version v1.7.2 jetstack/cert-manager +``` + + + + +## Installation + +## Installing with Helm + +You can deploy the operator using a Helm chart [Helm chart](https://github.com/konpyutaika/nifikop/tree/master/helm): + +> To install an other version of the operator use `helm install --name=nifikop --namespace=nifi --set operator.image.tag=x.y.z konpyutaika-incubator/nifikop` + +In the case where you don't want to deploy the crds using helm (`--skip-crds`), you have to deploy manually the crds : + +```bash +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiusers.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiusergroups.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifidataflows.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiparametercontexts.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiregistryclients.yaml +``` + +Now deploy the helm chart : + +```bash +# You have to create the namespace before executing following command +helm install nifikop \ + oci://ghcr.io/konpyutaika/helm-charts/nifikop \ + --namespace=nifi \ + --version 0.13.0 \ + --set image.tag=v0.13.0-release \ + --set resources.requests.memory=256Mi \ + --set resources.requests.cpu=250m \ + --set resources.limits.memory=256Mi \ + --set resources.limits.cpu=250m \ + --set namespaces={"nifi"} +``` + +:::note +Add the following parameter if you are using this instance to only deploy unsecured clusters : `--set certManager.enabled=false` +::: + +## Create custom storage class + +We recommend to use a **custom StorageClass** to leverage the volume binding mode `WaitForFirstConsumer` + +```bash +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: exampleStorageclass +parameters: + type: pd-standard +provisioner: kubernetes.io/gce-pd +reclaimPolicy: Delete +volumeBindingMode: WaitForFirstConsumer +``` + +:::tip +Remember to set your NiFiCluster CR properly to use the newly created StorageClass. +::: + +## Deploy NiFi cluster + +And after you can deploy a simple NiFi cluster. + +```bash +# Add your zookeeper svc name to the configuration +kubectl create -n nifi -f config/samples/simplenificluster.yaml +``` diff --git a/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/1_gke.md b/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/1_gke.md new file mode 100644 index 0000000000..c798f74434 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/1_gke.md @@ -0,0 +1,42 @@ +--- +id: 1_gke +title: Google Kubernetes Engine +sidebar_label: Google Kubernetes Engine +--- + +Follow these instructions to prepare a GKE cluster for NiFiKop + +1. Setup environment variables. + +```sh +export GCP_PROJECT= +export GCP_ZONE= +export CLUSTER_NAME= +``` + +2. Create a new cluster. + +```sh +gcloud container clusters create $CLUSTER_NAME \ + --cluster-version latest \ + --machine-type=n1-standard-1 \ + --num-nodes 4 \ + --zone $GCP_ZONE \ + --project $GCP_PROJECT +``` + +3. Retrieve your credentials for `kubectl`. + +```sh +cloud container clusters get-credentials $CLUSTER_NAME \ + --zone $GCP_ZONE \ + --project $GCP_PROJECT +``` + +4. Grant cluster administrator (admin) permissions to the current user. To create the necessary RBAC rules for NiFiKop, the current user requires admin permissions. + +```sh +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole=cluster-admin \ + --user=$(gcloud config get-value core/account) +``` diff --git a/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/2_k3d.md b/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/2_k3d.md new file mode 100644 index 0000000000..d268a05e94 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/2_setup/2_platform_setup/2_k3d.md @@ -0,0 +1,26 @@ +--- +id: 2_k3d +title: K3D +sidebar_label: K3D +--- + +Follow these instructions to prepare k3d for NiFiKop installation with sufficient resources to run NiFiKop and some basic applications. + +## Prerequisites + +- Administrative privileges are required to run k3d. + +## Installation steps + +1. Install the latest version of [k3d](https://k3d.io/v5.3.0/#installation), version 5.3.0 or later. +2. Create your Kubernetes cluster. This example uses Kubernetes version 1.21.10. You can change the version to any Kubernetes version supported by NiFiKop by altering the --kubernetes-version value: + + ```sh + k3d cluster create --image rancher/k3s:v1.21.10-k3s1 --wait + ``` + +3. Expose your NiFi cluster: + + ```sh + k3d cluster edit k3s-default --port-add ":@loadbalancer" + ``` \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/2_setup/3_install/1_customizable_install_with_helm.md b/site/website/versioned_docs/version-v0.13.0/2_setup/3_install/1_customizable_install_with_helm.md new file mode 100644 index 0000000000..41cf9a4813 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/2_setup/3_install/1_customizable_install_with_helm.md @@ -0,0 +1,197 @@ +--- +id: 1_customizable_install_with_helm +title: Customizable install with Helm +sidebar_label: Customizable install with Helm +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +## Prerequisites + +- Perform any necessary [plateform-specific setup](../2_platform_setup/1_gke.md) +- [Install a Helm client](https://github.com/helm/helm#install) with a version higher than 3 + +## Introduction + +This Helm chart install NiFiKop the Nifi Kubernetes operator to create/configure/manage NiFi +clusters in a Kubernetes Namespace. + +It will use Custom Ressources Definition CRDs: + +- `nificlusters.nifi.konpyutaika.com`, +- `nifiusers.nifi.konpyutaika.com`, +- `nifiusergroups.nifi.konpyutaika.com`, +- `nifiregistryclients.nifi.konpyutaika.com`, +- `nifiparametercontexts.nifi.konpyutaika.com`, +- `nifidataflows.nifi.konpyutaika.com`, + +### Configuration + +The following tables lists the configurable parameters of the NiFi Operator Helm chart and their default values. +| Parameter | Description | Default | +|----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------| +| `image.repository` | Image | `ghcr.io/konpyutaika/docker-images/nifikop` | +| `image.tag` | Image tag | `v0.13.0-release` | +| `image.pullPolicy` | Image pull policy | `Always` | +| `image.imagePullSecrets.enabled` | Enable tue use of secret for docker image | `false` | +| `image.imagePullSecrets.name` | Name of the secret to connect to docker registry | - | +| `certManager.enabled` | Enable cert-manager integration | `true` | +| `rbacEnable` | If true, create & use RBAC resources | `true` | +| `resources` | Pod resource requests & limits | `{}` | +| `metrics.enabled` | deploy service for metrics | `false` | +| `metrics.port` | Set port for operator metrics | `8081` | +| `logLevel` | Log level to output | `Info` | +| `logEncoding` | Log encoding to use. Either `json` or `console` | `json` | +| `certManager.clusterScoped` | If true setup cluster scoped resources | `false` | +| `namespaces` | List of namespaces where Operator watches for custom resources. Make sure the operator ServiceAccount is granted `get` permissions on this `Node` resource when using limited RBACs. | `""` i.e. all namespaces | +| `nodeSelector` | Node selector configuration for operator pod | `{}` | +| `affinity` | Node affinity configuration for operator pod | `{}` | +| `tolerations` | Toleration configuration for operator pod | `{}` | +| `serviceAccount.create` | Whether the SA creation is delegated to the chart or not | `true` | +| `serviceAccount.name` | Name of the SA used for NiFiKop deployment | release name | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install nifikop \ + konpyutaika/nifikop \ + -f values.yaml +``` + +### Installing the Chart + +:::important Skip CRDs +In the case where you don't want to deploy the crds using helm (`--skip-crds`) you need to deploy manually the crds beforehand: + +```bash +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiusers.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiusergroups.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifidataflows.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiparametercontexts.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiregistryclients.yaml +``` + +::: + + + + +```bash +helm install nifikop konpyutaika/nifikop \ + --dry-run \ + --set logLevel=Debug \ + --set namespaces={"nifikop"} +``` + + + + +```bash +helm install konpyutaika/nifikop +``` + + + + + +```bash +helm install nifikop konpyutaika/nifikop --set namespaces={"nifikop"} +``` + + + + +> the `--replace` flag allow you to reuses a charts release name + +### Listing deployed charts + +```bash +helm list +``` + +### Get Status for the helm deployment + +```bash +helm status nifikop +``` + +## Uninstaling the Charts + +If you want to delete the operator from your Kubernetes cluster, the operator deployment +should be deleted. + +```bash +helm del nifikop +``` + +The command removes all the Kubernetes components associated with the chart and deletes the helm release. + +:::tip +The CRD created by the chart are not removed by default and should be manually cleaned up (if required) +::: + +Manually delete the CRD: + +```bash +kubectl delete crd nificlusters.nifi.konpyutaika.com +kubectl delete crd nifiusers.nifi.konpyutaika.com +kubectl delete crd nifiusergroups.nifi.konpyutaika.com +kubectl delete crd nifiregistryclients.nifi.konpyutaika.com +kubectl delete crd nifiparametercontexts.nifi.konpyutaika.com +kubectl delete crd nifidataflows.nifi.konpyutaika.com +``` + +:::warning +If you delete the CRD then +It will delete **ALL** Clusters that has been created using this CRD!!! +Please never delete a CRD without very good care +::: + +Helm always keeps records of what releases happened. Need to see the deleted releases ? + +```bash +helm list --deleted +``` + +Need to see all of the releases (deleted and currently deployed, as well as releases that +failed) ? + +```bash +helm list --all +``` + +Because Helm keeps records of deleted releases, a release name cannot be re-used. (If you really need to re-use a +release name, you can use the `--replace` flag, but it will simply re-use the existing release and replace its +resources.) + +Note that because releases are preserved in this way, you can rollback a deleted resource, and have it re-activate. + +To purge a release + +```bash +helm delete --purge nifikop +``` + +## Troubleshooting + +### Install of the CRD + +By default, the chart will install the CRDs, but this installation is global for the whole +cluster, and you may want to not modify the already deployed CRDs. + +In this case there is a parameter to say to not install the CRDs : + +``` +$ helm install --name nifikop ./helm/nifikop --set namespaces={"nifikop"} --skip-crds +``` diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md new file mode 100644 index 0000000000..0260901b17 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/1_nodes_configuration.md @@ -0,0 +1,9 @@ +--- +id: 1_nodes_configuration +title: Nodes configuration +sidebar_label: Nodes configuration +--- + +:::warning +WIP +::: \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md new file mode 100644 index 0000000000..1c9f4cda61 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/2_cluster_scaling.md @@ -0,0 +1,237 @@ +--- +id: 2_cluster_scaling +title: Cluster Scaling +sidebar_label: Cluster Scaling +--- + +This tasks shows you how to perform a gracefull cluster scale up and scale down. + +## Before you begin + +- Setup NiFiKop by following the instructions in the [Installation guide](../../2_setup/1_getting_started.md). +- Deploy the [Simple NiFi](../../2_setup/1_getting_started.md#easy-way-installing-with-helm) sample cluster. +- Review the [Node](../../5_references/1_nifi_cluster/4_node.md) references doc. + +## About this task + +The [Simple NiFi](../../2_setup/1_getting_started.md#easy-way-installing-with-helm) example consists of a three nodes NiFi cluster. +A node decommission must follow a strict procedure, described in the [NiFi documentation](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#decommission-nodes) : + +1. Disconnect the node +2. Once disconnect completes, offload the node. +3. Once offload completes, delete the node. +4. Once the delete request has finished, stop/remove the NiFi service on the host. + + +For the moment, we have implemented it as follows in the operator : + + 1. Disconnect the node + 2. Once disconnect completes, offload the node. + 3. Once offload completes, delete the pod. + 4. Once the pod deletion completes, delete the node. + 5. Once the delete request has finished, remove the node from the NifiCluster status. + +In addition, we have a regular check that ensure that all nodes have been removed. + +In this task, you will first perform a scale up, in adding an new node. Then, you will remove another node that the one created, and observe the decommission's steps. + +## Scale up : Add a new node + +For this task, we will simply add a node with the same configuration than the other ones, if you want to know more about how to add a node with an other configuration let's have a look to the [Node configuration](./1_nodes_configuration.md) documentation page. + +1. Add and run a dataflow as the example : + +![Scaling dataflow](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaling_dataflow.png) + +2. Add a new node to the list of `NifiCluster.Spec.Nodes` field, by following the [Node object definition](../../5_references/1_nifi_cluster/4_node.md) documentation: + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + service: + headlessEnabled: true + zkAddress: "zookeepercluster-client.zookeeper:2181" + zkPath: "/simplenifi" + clusterImage: "apache/nifi:1.12.1" + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + storageConfigs: + - mountPath: "/opt/nifi/nifi-current/logs" + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: "standard" + resources: + requests: + storage: 10Gi + serviceAccountName: "default" + resourcesRequirements: + limits: + cpu: "2" + memory: 3Gi + requests: + cpu: "1" + memory: 1Gi + nodes: + - id: 0 + nodeConfigGroup: "default_group" + - id: 1 + nodeConfigGroup: "default_group" + - id: 2 + nodeConfigGroup: "default_group" +# >>>> START: The new node + - id: 25 + nodeConfigGroup: "default_group" +# <<<< END + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: "http" + name: "http" + containerPort: 8080 + - type: "cluster" + name: "cluster" + containerPort: 6007 + - type: "s2s" + name: "s2s" + containerPort: 10000 +``` + +:::important +**Note :** The `Node.Id` field must be unique in the `NifiCluster.Spec.Nodes` list. +::: + +3. Apply the new `NifiCluster` configuration : + +```sh +kubectl -n nifi apply -f config/samples/simplenificluster.yaml +``` + +4. You should now have the following resources into kubernetes : + +```console +kubectl get pods,configmap,pvc -l nodeId=25 +NAME READY STATUS RESTARTS AGE +pod/simplenifi-25-nodem5jh4 1/1 Running 0 11m + +NAME DATA AGE +configmap/simplenifi-config-25 7 11m + +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +persistentvolumeclaim/simplenifi-25-storagehwn24 Bound pvc-7da86076-728e-11ea-846d-42010a8400f2 10Gi RWO standard 11m +``` + +And if you go on the NiFi UI, in the cluster administration page : + +![Scale up, cluster list](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaleup_cluster_list.png) + +5. You now have data on the new node : + +![Scale up, cluster distribution](/img/3_tasks/1_nifi_cluster/2_cluster_scaling/scaleup_distribution.png) + +## Scaledown : Gracefully remove node + +For this task, we will simply remove a node and look at that the decommission's steps. + +1. Remove the node from the list of `NifiCluster.Spec.Nodes` field : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + headlessServiceEnabled: true + zkAddresse: "zookeepercluster-client.zookeeper:2181" + zkPath: "/simplenifi" + clusterImage: "apache/nifi:1.11.3" + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + storageConfigs: + - mountPath: "/opt/nifi/nifi-current/logs" + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: "standard" + resources: + requests: + storage: 10Gi + serviceAccountName: "default" + resourcesRequirements: + limits: + cpu: "2" + memory: 3Gi + requests: + cpu: "1" + memory: 1Gi + nodes: + - id: 0 + nodeConfigGroup: "default_group" + - id: 1 + nodeConfigGroup: "default_group" +# >>>> START: node removed +# - id: 2 +# nodeConfigGroup: "default_group" +# <<<< END + - id: 25 + nodeConfigGroup: "default_group" + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: "http" + name: "http" + containerPort: 8080 + - type: "cluster" + name: "cluster" + containerPort: 6007 + - type: "s2s" + name: "s2s" + containerPort: 10000 +``` + +2. Apply the new `NifiCluster` configuration : + +```sh +kubectl -n nifi apply -f config/samples/simplenificluster.yaml +``` + +3. You can follow the node's action step status in the `NifiCluster.Status` description : + +```console +kubectl describe nificluster simplenifi + +... +Status: + Nodes State: + ... + 2: + Configuration State: ConfigInSync + Graceful Action State: + Action State: GracefulDownscaleRequired + Error Message: + ... +... +``` + +:::tip +The list of decommision's step and their corresponding value for the `Nifi Cluster.Status.Node State.Graceful ActionState.ActionStep` field is described into the [Node State page](../../5_references/1_nifi_cluster/5_node_state.md#actionstep) +::: + +4. Once the scaledown successfully performed, you should have the data offloaded on the other nodes, and the node state removed from the `NifiCluster.Status.NodesState` list : + +:::warning +Keep in mind that the [`NifiCluster.Spec.nifiClusterTaskSpec.retryDurationMinutes`](../../5_references/1_nifi_cluster/1_nifi_cluster.md#nificlustertaskspec) should be long enough to perform the whole procedure, or you will have some rollback and retry loop. +::: \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/3_external_dns.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/3_external_dns.md new file mode 100644 index 0000000000..27c33dc5ec --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/3_external_dns.md @@ -0,0 +1,9 @@ +--- +id: 3_external_dns +title: External DNS +sidebar_label: External DNS +--- + +:::warning +WIP +::: \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/4_external_cluster.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/4_external_cluster.md new file mode 100644 index 0000000000..00c9344864 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/1_nifi_cluster/4_external_cluster.md @@ -0,0 +1,93 @@ +--- +id: 4_external_cluster +title: External cluster +sidebar_label: External cluster +--- + +This task shows you how to configure an external cluster. + +## Common configuration + +The operator allows you to manage the Dataflow lifecycle for internal (i.e cluster managed by the operator) and external NiFi cluster. +A NiFi cluster is considered as external as soon as the `NifiCluster` resource used as reference in other NiFi resource explicitly detailed the way to comunicate with the cluster. + +This feature allows you : + +- To automate your Dataflow CI/CD using yaml +- To manage the same way your Dataflow management wherever your cluster is, on bare metal, VMs, k8s, on-premise or on cloud. + +To deploy different resources (`NifiRegistryClient`, `NifiUser`, `NifiUserGroup`, `NifiParameterContext`, `NifiDataflow`) you simply have to declare a `NifiCluster` resource explaining how to discuss with the external cluster, and refer to this resource as usual using the `Spec.ClusterRef` field. + +To declare an external cluster you have to follow this kind of configuration : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +metadata: + name: externalcluster +spec: + # rootProcessGroupId contains the uuid of the root process group for this cluster. + rootProcessGroupId: 'd37bee03-017a-1000-cff7-4eaaa82266b7' + # nodeURITemplate used to dynamically compute node uri. + nodeURITemplate: 'nifi0%d.integ.mapreduce.m0.p.fti.net:9090' + # all node requiresunique id + nodes: + - id: 1 + - id: 2 + - id: 3 + # type defines if the cluster is internal (i.e manager by the operator) or external. + # :Enum={"external","internal"} + type: 'external' + # clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. + # Enum={"tls","basic"} + clientType: 'basic' + # secretRef reference the secret containing the informations required to authenticate to the cluster. + secretRef: + name: nifikop-credentials + namespace: nifikop-nifi +``` + +- The `Spec.RootProcessGroupId` field is required to give the ability to the operator of managing root level policy and default deployment and policy. +- The `Spec.NodeURITemplate` field, defines the hostname template of your NiFi cluster nodes, the operator will use this information and the list of id specified in `Spec.Nodes` field to generate the hostname of the nodes (in the configuration above you will have : `nifi01.integ.mapreduce.m0.p.fti.net:9090`, `nifi02.integ.mapreduce.m0.p.fti.net:9090`, `nifi03.integ.mapreduce.m0.p.fti.net:9090`). +- The `Spec.Type` field defines the type of cluster that this resource is refering to, by default it is `internal`, in our case here we just want to use this resource to reference an existing NiFi cluster, so we set this field to `external`. +- The `Spec.ClientType` field defines how we want to authenticate to the NiFi cluster API, for now we are supporting two modes : + - `tls` : using client TLS certificate. + - `basic` : using a username and a password to get an access token. +- The `Spec.SecretRef` defines a reference to a secret which contains the sensitive values that will be used by the operator to authenticate to the NiFi cluster API (ie in basic mode it will contain the password and username). + +:::warning +The id of node only support `int32` as type, so if the hostname of your nodes doesn't match with this, you can't use this feature. +::: + +## Secret configuration for Basic authentication + +When you are using the basic authentication, you have to pass some informations into the secret that is referenced into the `NifiCluster` resource: + +- `username` : the username associated to the user that will be used by the operator to request the REST API. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt (optional)`: the certificate authority to trust the server certificate if needed + +The following command shows how you can create this secret : + +```console +kubectl create secret generic nifikop-credentials \ + --from-file=username=./secrets/username\ + --from-file=password=./secrets/password\ + --from-file=ca.crt=./secrets/ca.crt\ + -n nifikop-nifi +``` + +:::info +When you use the basic authentication, the operator will create a secret `-basic-secret` containing for each node an access token that will be maintained by the operator. +::: + +## Secret configuration for TLS authentication + +When you are using the tls authentication, you have to pass some information into the secret that is referenced into the `NifiCluster` resource: + +- `tls.key` : The user private key. +- `tls.crt` : The user certificate. +- `password` : the password associated to the user that will be used by the operator to request the REST API. +- `ca.crt`: The CA certificate +- `truststore.jks`: +- `keystore.jks`: diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/1_ssl.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/1_ssl.md new file mode 100644 index 0000000000..0dc262f377 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/1_ssl.md @@ -0,0 +1,159 @@ +--- +id: 1_ssl +title: Securing NiFi with SSL +sidebar_label: SSL +--- + +The `NiFi operator` makes securing your NiFi cluster with SSL. You may provide your own certificates, or instruct the operator to create them for from your cluster configuration. + +Below this is an example configuration required to secure your cluster with SSL : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + managedAdminUsers: + - identity : "alexandre.guitton@orange.com" + name: "aguitton" + ... + readOnlyConfig: + # NifiProperties configuration that will be applied to the node. + nifiProperties: + webProxyHosts: + - nifistandard2.trycatchlearn.fr:8443 + ... + ... + listenersConfig: + internalListeners: + - type: "https" + name: "https" + containerPort: 8443 + - type: "cluster" + name: "cluster" + containerPort: 6007 + - type: "s2s" + name: "s2s" + containerPort: 10000 + sslSecrets: + tlsSecretName: "test-nifikop" + create: true +``` + +- `managedAdminUsers` : list of users account which will be configured as admin into NiFi cluster, please check [](../4_nifi_user_group#managed-groups-for-simple-setup) for more information. +- `readOnlyConfig.nifiProperties.webProxyHosts` : A list of allowed HTTP Host header values to consider when NiFi is running securely and will be receiving requests to a different host[:port] than it is bound to. [web-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties) + +If `listenersConfig.sslSecrets.create` is set to `false`, the operator will look for the secret at `listenersConfig.sslSecrets.tlsSecretName` and expect these values : + +| key | value | +|-----|-------| +| caCert | The CA certificate | +| caKey | The CA private key | +| clientCert | A client certificate (this will be used by operator for NiFI operations) | +| clientKey | The private key for clientCert | + +## Using an existing Issuer + +As described in the [Reference section](../../5_references/1_nifi_cluster/6_listeners_config.md#sslsecrets), instead of using a self-signed certificate as CA, you can use an existing one. +In order to do so, you only have to refer it into your `Spec.ListenerConfig.SslSecrets.IssuerRef` field. + +### Example : Let's encrypt + +Let's say you have an existing DNS server, with [external dns](https://github.com/kubernetes-sigs/external-dns) deployed into your cluster's namespace. +You can easily use Let's encrypt as authority for your certificate. + +To do this, you have to : + +1. Create an issuer : + +```yaml +apiVersion: cert-manager.io/v1alpha2 +kind: Issuer +metadata: + name: letsencrypt-staging +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: example-issuer-account-key + # Add a single challenge solver, HTTP01 using nginx + solvers: + - http01: + ingress: + ingressTemplate: + metadata: + annotations: + "external-dns.alpha.kubernetes.io/ttl": "5" +``` + +2. Setup External dns and correctly create your issuer into your cluster configuration : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + clusterSecure: true + siteToSiteSecure: true + ... + listenersConfig: + clusterDomain: + useExternalDNS: true + ... + sslSecrets: + tlsSecretName: "test-nifikop" + create: true + issuerRef: + name: letsencrypt-staging + kind: Issuer +``` + +## Create SSL credentials + +You may use `NifiUser` resource to create new certificates for your applications, allowing them to query your Nifi cluster. + +To create a new client you will need to generate new certificates sign by the CA. The operator can automate this for you using the `NifiUser` CRD : + +```console +cat << EOF | kubectl apply -n nifi -f - +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiUser +metadata: + name: example-client + namespace: nifi +spec: + clusterRef: + name: nifi + secretName: example-client-secret +EOF +``` + +This will create a user and store its credentials in the secret `example-client-secret`. The secret contains these fields : + +| key | value | +|-----|-------| +| ca.crt | The CA certificate | +| tls.crt | The user certificate | +| tls.key | The user private key | + +You can then mount these secret to your pod. Alternatively, you can write them to your local machine by running: + +```console +kubectl get secret example-client-secret -o jsonpath="{['data']['ca\.crt']}" | base64 -d > ca.crt +kubectl get secret example-client-secret -o jsonpath="{['data']['tls\.crt']}" | base64 -d > tls.crt +kubectl get secret example-client-secret -o jsonpath="{['data']['tls\.key']}" | base64 -d > tls.key +``` + +The operator can also include a Java keystore format (JKS) with your user secret if you'd like. Add `includeJKS`: `true` to the `spec` like shown above, and then the user-secret will gain these additional fields : + +| key | value | +|-----|-------| +| tls.jks | The java keystore containing both the user keys and the CA (use this for your keystore AND truststore) | +| pass.txt | The password to decrypt the JKS (this will be randomly generated) | \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authentication/1_oidc.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authentication/1_oidc.md new file mode 100644 index 0000000000..06cf173abe --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authentication/1_oidc.md @@ -0,0 +1,42 @@ +--- +id: 1_oidc +title: OpenId Connect +sidebar_label: OpenId Connect +--- + +To enable authentication via OpenId Connect refering to [NiFi Administration guide](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html) required some configuration into `nifi.properties`. + +In addition and to ensure multiple identity provider support, we recommended to add the following configuration to your `nifi.properties` : + +```sh +nifi.security.identity.mapping.pattern.dn=CN=([^,]*)(?:, (?:O|OU)=.*)? +nifi.security.identity.mapping.value.dn=$1 +nifi.security.identity.mapping.transform.dn=NONE +``` + +To perform this with `NiFiKop` you just have to configure the `Spec.NifiProperties.OverrideConfigs` field with your OIDC configuration, for example : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +... +spec: + ... + readOnlyConfig: + # NifiProperties configuration that will be applied to the node. + nifiProperties: + webProxyHosts: + - nifistandard2.trycatchlearn.fr:8443 + # Additionnals nifi.properties configuration that will override the one produced based + # on template and configurations. + overrideConfigs: | + nifi.security.user.oidc.discovery.url= + nifi.security.user.oidc.client.id= + nifi.security.user.oidc.client.secret= + nifi.security.identity.mapping.pattern.dn=CN=([^,]*)(?:, (?:O|OU)=.*)? + nifi.security.identity.mapping.value.dn=$1 + nifi.security.identity.mapping.transform.dn=NONE + ... + ... +... +``` \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authorization/1_custom_authorizer.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authorization/1_custom_authorizer.md new file mode 100644 index 0000000000..515a8dd806 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/2_security/2_authorization/1_custom_authorizer.md @@ -0,0 +1,83 @@ +--- +id: 1_authorizer +title: Custom User Authorizers +sidebar_label: Custom Authorizers +--- + +:::info +This is an advanced configuration topic. In most cases, the default NiFi authorizer configuration is sufficient. +::: + +According to the NiFi Admin Guide, an [Authorizer](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration) grants users the privileges to manage users and policies by creating preliminary authorizations at startup. By default, the `StandardManagedAuthorizer` leverages a `FileUserGroupProvider` and a `FileAccessPolicyProvider` which are file-based rules for each user you allow to interact with your NiFi cluster. + +In many cases, the default authorizer configuration is enough to control access to a NiFi cluster. However, there may be advanced cases where the default [`managed-authorizer`](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#standardmanagedauthorizer) isn't sufficient to make every authorization decision you need. In this case, you can provide a custom authorizer extension and use that instead. + +Suppose a custom Authorizer is written and deployed with NiFi that reads the rules from a remote database rather than a local file. We'll call this `DatabaseAuthorizer`. Also suppose it is composed of a `DatabaseUserGroupProvider` and a `DatabaseAccessPolicyProvider`. In order to leverage these, they must end up on NiFi's classpath. + +In order to use this authorizer, you need to update NiFi's `authorizers.xml` configuration. This can be done through NiFiKOp by setting either the `Spec.readOnlyConfig.authorizerConfig.replaceTemplateConfigMap` or `Spec.readOnlyConfig.authorizerConfig.replaceTemplateSecretConfig`. The NiFiKOp deployment is dynamic in that node identities are determined at deploy time, so the authorizer configuration is templated to account for this. This means that the replacement ConfigMap or Secret must also be templated. + +Following the example, the below would be a sufficient authorizer template replacement: + +```yaml +{{- $nodeList := .NodeList }} +{{- $clusterName := .ClusterName }} +{{- $namespace := .Namespace }} + + + file-user-group-provider + org.apache.nifi.authorization.FileUserGroupProvider + ../data/users.xml + + {{ .ControllerUser }} +{{- range $i, $host := .NodeList }} + {{ $host }} +{{- end }} + + + database-user-group-provider + my.custom.DatabaseUserGroupProvider + +{{- range $i, $host := .NodeList }} + {{ $host }} +{{- end }} + + + file-access-policy-provider + org.apache.nifi.authorization.FileAccessPolicyProvider + file-user-group-provider + ../data/authorizations.xml + {{ .ControllerUser }} + +{{- range $i, $host := .NodeList }} + {{ $host }} +{{- end }} + + + + database-access-policy-provider + my.custom.DatabaseAccessPolicyProvider + +{{- range $i, $host := .NodeList }} + {{ $host }} +{{- end }} + + + + managed-authorizer + org.apache.nifi.authorization.StandardManagedAuthorizer + file-access-policy-provider + + + custom-database-authorizer + my.custom.DatabaseAuthorizer + database-access-policy-provider + + +``` + +And finally, the NiFi property `nifi.security.user.authorizer` indicates which of the configured authorizers in the authorizers.xml file to use. Following the example, we'd set the property to: + +```sh +nifi.security.user.authorizer=custom-database-authorizer +``` + diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/3_nifi_dataflow.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/3_nifi_dataflow.md new file mode 100644 index 0000000000..e944fd01aa --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/3_nifi_dataflow.md @@ -0,0 +1,126 @@ +--- +id: 3_nifi_dataflow +title: Provisioning NiFi Dataflows +sidebar_label: NiFi Dataflows +--- + +You can create NiFi dataflows either : + +* directly against the cluster through its REST API (using UI or some home made scripts), or +* via the `NifiDataflow` CRD. + +If you want more details about the design, just have a look on the [design page](../1_concepts/2_design_principes.md#dataflow-lifecycle-management) + +To deploy a [NifiDataflow] you have to start by deploying a [NifiRegistryClient] because **NiFiKop** manages dataflow using the [NiFi Registry feature](https://nifi.apache.org/registry). + +Below is an example of [NifiRegistryClient] : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiRegistryClient +metadata: + name: registry-client-example + namespace: nifikop +spec: + clusterRef: + name: nc + namespace: nifikop + description: "Registry client managed by NiFiKop" + uri: "http://nifi.hostname.com:18080" +``` + +Once you have deployed your [NifiRegistryClient], you have the possibility of defining a configuration that you will apply to your [NifiDataflow]. + +This configuration is defined using the [NifiParameterContext] CRD, which NiFiKop will convert into a [Parameter context](https://nifi.apache.org/docs/nifi-docs/html/user-guide.html#parameter-contexts). + + +Below is an example of [NifiParameterContext] : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiParameterContext +metadata: + name: dataflow-lifecycle + namespace: demo +spec: + description: "It is a test" + clusterRef: + name: nc + namespace: nifikop + secretRefs: + - name: secret-params + namespace: nifikop + parameters: + - name: test + value: toto + description: tutu + - name: test2 + value: toto + description: toto +``` + +As you can see, in the [NifiParameterContext] you can refer to some secrets that will be converted into [sensitive parameter](https://nifi.apache.org/docs/nifi-docs/html/user-guide.html#using-parameters-with-sensitive-properties). + +Here is an example of secret that you can create that will be used by the configuration above : + +```console +kubectl create secret generic secret-params \ + --from-literal=secret1=yop \ + --from-literal=secret2=yep \ + -n nifikop +``` + +:::warning +As a sensitive value cannot be retrieved through the Rest API, to update the value of a sensitive parameter, you have to : + +- remove it from the secret +- wait for the next loop +- insert the parameter with the new value inside the secret + +or you can simply create a new [NifiParameterContext] and refer it into your [NifiDataflow]. +::: + +You can now deploy your [NifiDataflow] by referencing the previous objects : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiDataflow +metadata: + name: dataflow-lifecycle +spec: + parentProcessGroupID: "16cfd2ec-0174-1000-0000-00004b9b35cc" + bucketId: "01ced6cc-0378-4893-9403-f6c70d080d4f" + flowId: "9b2fb465-fb45-49e7-94fe-45b16b642ac9" + flowVersion: 2 + syncMode: always + skipInvalidControllerService: true + skipInvalidComponent: true + clusterRef: + name: nc + namespace: nifikop + registryClientRef: + name: registry-client-example + namespace: nifikop + parameterContextRef: + name: dataflow-lifecycle + namespace: demo + updateStrategy: drain +``` + +To find details about the versioned flow information required check the [official documentation](https://nifi.apache.org/docs/nifi-registry-docs/index.html) + +You have two modes of control from your dataflow by the operator : + +1 - `Spec.SyncMode == never` : The operator will deploy the dataflow as described in the resource, and never control it (unless you change the field to `always`). It is useful when you want to deploy your dataflow without starting it. + +2 - `Spec.SyncMode == once` : The operator will deploy the dataflow as described in the resource, run it once, and never control it again (unless you change the field to `always`). It is useful when you want to deploy your dataflow in a dev environment, and you want to update the dataflow. + +3 - `Spec.SyncMode == always` : The operator will deploy and ensure the dataflow lifecycle, it will avoid all manual modification directly from the Cluster (e.g remove the process group, remove the versioning, update the parent process group, make some local changes ...). If you want to perform update, rollback or stuff like this, you have to simply update the [NifiDataflow] resource. + +:::important +More information about `Spec.UpdateStrategy` [here](../5_references/5_nifi_dataflow.md#dataflowupdatestrategy) +::: + +[NifiDataflow]: ../5_references/5_nifi_dataflow.md +[NifiRegistryClient]: ../5_references/3_nifi_registry_client.md +[NifiParameterContext]: ../5_references/4_nifi_parameter_context.md \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/3_tasks/4_nifi_user_group.md b/site/website/versioned_docs/version-v0.13.0/3_tasks/4_nifi_user_group.md new file mode 100644 index 0000000000..f89c2102a2 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/3_tasks/4_nifi_user_group.md @@ -0,0 +1,168 @@ +--- +id: 4_nifi_user_group +title: Provisioning NiFi Users and Groups +sidebar_label: NiFi Users and Groups +--- + +## User management + +The `NifiUser` resource was already introduced for the [SSL credentials](./2_security/1_ssl.md#create-ssl-credentials) concerns. +What we are covering here is the NiFi user management part introduced in this resource. + +When you create a `NifiUser` resource the operator will : + +1. Try to check if a user already exists with the same name on the NiFi cluster, if it does, the operator will set [NifiUser.Status.Id](./2_security/1_ssl.md#create-ssl-credentials) to bind it with the kubernetes resource. +2. If no user is found, the operator will create and manage it (i.e it will ensure the synchronisation with the NiFi Cluster). + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiUser +metadata: + name: aguitton +spec: + # identity field is use to define the user identity on NiFi cluster side, + # it use full when the user's name doesn't suite with Kubernetes resource name. + identity: alexandre.guitton@orange.com + # Contains the reference to the NifiCluster with the one the registry client is linked. + clusterRef: + name: nc + namespace: nifikop + # Whether or not the the operator also include a Java keystore format (JKS) with you secret + includeJKS: false + # Whether or not a certificate will be created for this user. + createCert: false + # defines the list of access policies that will be granted to the group. + accessPolicies: + # defines the kind of access policy, could be "global" or "component". + - type: component + # defines the kind of action that will be granted, could be "read" or "write" + action: read + # resource defines the kind of resource targeted by this access policies, please refer to the following page : + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies + resource: /data + # componentType is used if the type is "component", it's allow to define the kind of component on which is the + # access policy + componentType: "process-groups" + # componentId is used if the type is "component", it's allow to define the id of the component on which is the + # access policy + componentId: "" +``` + +By default the user name that will be used is the name of the resource. + +But as there are some constraints on this name (e.g [RFC 1123](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names)) that doesn't match with those applied on NiFi, you can override it with the `NifiUser.Spec.Identity` field which is more permissive. +In the example above the kubernetes resource name will be `aguitton` but the NiFi use created on the cluster will be `alexandre.guitton@orange.com`. + +In the case the user will not authenticate himself using TLS authentication, the operator doesn't have to create a certificate, so just set `NifiUser.Spec.CreateCert` to false. + +For each user, you have the ability to define a list of [AccessPolicies](../5_references/2_nifi_user.md#accesspolicy) to give a list of access to your user. +In the example above we are giving to user `alexandre.guitton@orange.com` the right to view metadata et content for the root process group in flowfile queues in outbound connections and through provenance events. + +## UserGroup management + +To simplify the access management Apache NiFi allows to define groups containing a list of users, on which we apply a list of access policies. +This part is supported by the operator using the `NifiUserGroup` resource : + + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiUserGroup +metadata: + name: group-test +spec: + # Contains the reference to the NifiCluster with the one the registry client is linked. + clusterRef: + name: nc + namespace: nifikop + # contains the list of reference to NifiUsers that are part to the group. + usersRef: + - name: nc-0-node.nc-headless.nifikop.svc.cluster.local +# namespace: nifikop + - name: nc-controller.nifikop.mgt.cluster.local + # defines the list of access policies that will be granted to the group. + accessPolicies: + # defines the kind of access policy, could be "global" or "component". + - type: global + # defines the kind of action that will be granted, could be "read" or "write" + action: read + # resource defines the kind of resource targeted by this access policies, please refer to the following page : + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies + resource: /counters +# # componentType is used if the type is "component", it's allow to define the kind of component on which is the +# # access policy +# componentType: "process-groups" +# # componentId is used if the type is "component", it's allow to define the id of the component on which is the +# # access policy +# componentId: "" +``` + +When you create a `NifiUserGroup` resource, the operator will create and manage a group named `${resource namespace}-${resource name}` in Nifi. +To declare the users that are part of this group, you just have to declare them in the [NifiUserGroup.UsersRef](../5_references/6_nifi_usergroup.md#userreference) field. + +:::important +The [NifiUserGroup.UsersRef](../5_references/6_nifi_usergroup.md#userreference) requires to declare the name and namespace of a `NifiUser` resource, so it is previously required to declare the resource. + +It's required to create the resource even if the user is already declared in NiFi Cluster (In that case the operator will just sync the kubernetes resource). +::: + +Like for `NifiUser` you can declare a list of [AccessPolicies](../5_references/2_nifi_user.md#accesspolicy) to give a list of access to your user. + +In the example above we are giving to users `nc-0-node.nc-headless.nifikop.svc.cluster.local` and `nc-controller.nifikop.mgt.cluster.local` the right to view the counters informations. + +## Managed groups for simple setup + +In some case these two features could be heavy to define, for example when you have 10 dataflows with one cluster for each of them, it will lead in a lot of `.yaml` files ... +To simplify this, we implement in the operator 2 `managed groups` : + +- **Admins :** a group giving access to everything on the NiFi Cluster, +- **Readers :** a group giving access as viewer on the NiFi Cluster. + +You can directly define the list of users who belong to each of them in the `NifiCluster.Spec` field : + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +metadata: + name: mynifi +spec: + ... + oneNifiNodePerNode: false + # + propagateLabels: true + managedAdminUsers: + - identity : "alexandre.guitton@orange.com" + name: "aguitton" + - identity : "nifiuser@orange.com" + name: "nifiuser" + managedReaderUsers: + - identity : "toto@orange.com" + name: "toto" + ... +``` + +In this example the operator will create and manage 3 `NifiUsers` : + +- **aguitton**, with the identity : `alexandre.guitton@orange.com` +- **nifiuser**, with the identity : `nifiuser@orange.com` +- **toto**, with the identity : `toto@orange.com` + +And create and manage two groups : + +- **managed-admins :** that will contain 3 users (**aguitton**, **nifiuser**, **nc-controller.nifikop.mgt.cluster.local** which is the controller user). +- **managed-readers :** that will contain 1 user (**toto**) + +And the rest of the stuff will be reconciled and managed as described for `NifiUsers` and `NifiUserGroups`. + +:::note +There is one more group that is created and managed by the operator, this is the **managed-nodes** group, for each node a `NifiUser` is created, and we automatically add them to this group to give them the right list of accesses. + +To get the list of managed groups just check the list of `NifiUserGroup` : + +```console +kubectl get -n nifikop nifiusergroups.nifi.konpyutaika.com +NAME AGE +managed-admins 6d7h +managed-nodes 6d7h +managed-readers 6d7h +``` +::: \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/4_examples/1_simple_nifi_cluster.md b/site/website/versioned_docs/version-v0.13.0/4_examples/1_simple_nifi_cluster.md new file mode 100644 index 0000000000..224511cdbe --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/4_examples/1_simple_nifi_cluster.md @@ -0,0 +1,5 @@ +--- +id: 1_simple_nifi_cluster +title: Simple NiFi cluster +sidebar_label: Simple NiFi cluster +--- \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/1_nifi_cluster.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/1_nifi_cluster.md new file mode 100644 index 0000000000..4977ceeff6 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/1_nifi_cluster.md @@ -0,0 +1,202 @@ +--- +id: 1_nifi_cluster +title: NiFi cluster +sidebar_label: NiFi cluster +--- + +`NifiCluster` describes the desired state of the NiFi cluster we want to setup through the operator. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiCluster +metadata: + name: simplenifi +spec: + service: + headlessEnabled: true + annotations: + tyty: ytyt + labels: + tete: titi + pod: + annotations: + toto: tata + labels: + titi: tutu + zkAddress: 'zookeepercluster-client.zookeeper:2181' + zkPath: '/simplenifi' + clusterImage: 'apache/nifi:1.11.3' + oneNifiNodePerNode: false + nodeConfigGroups: + default_group: + isNode: true + podMetadata: + annotations: + node-annotation: "node-annotation-value" + labels: + node-label: "node-label-value" + externalVolumeConfigs: + - name: example-volume + mountPath: "/opt/nifi/example" + secret: + secretName: "raw-controller" + storageConfigs: + - mountPath: '/opt/nifi/nifi-current/logs' + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: 'standard' + resources: + requests: + storage: 10Gi + serviceAccountName: 'default' + resourcesRequirements: + limits: + cpu: '2' + memory: 3Gi + requests: + cpu: '1' + memory: 1Gi + nodes: + - id: 1 + nodeConfigGroup: 'default_group' + - id: 2 + nodeConfigGroup: 'default_group' + propagateLabels: true + nifiClusterTaskSpec: + retryDurationMinutes: 10 + listenersConfig: + internalListeners: + - type: 'http' + name: 'http' + containerPort: 8080 + - type: 'cluster' + name: 'cluster' + containerPort: 6007 + - type: 's2s' + name: 's2s' + containerPort: 10000 + externalServices: + - name: 'clusterip' + spec: + type: ClusterIP + portConfigs: + - port: 8080 + internalListenerName: 'http' + metadata: + annotations: + toto: tata + labels: + titi: tutu +``` + +## NifiCluster + +| Field | Type | Description | Required | Default | +| -------- | ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------- | ------- | +| metadata | [ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta) | is metadata that all persisted resources must have, which includes all objects users must create. | No | nil | +| spec | [NifiClusterSpec](#nificlusterspec) | defines the desired state of NifiCluster. | No | nil | +| status | [NifiClusterStatus](#nificlusterstatus) | defines the observed state of NifiCluster. | No | nil | + +## NifiClusterSpec + +| Field | Type | Description | Required | Default | +| ------------------ |----------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ---------------- |--------------------------| +| clientType | Enum={"tls","basic"} | defines if the operator will use basic or tls authentication to query the NiFi cluster. | No | `tls` | +| type | Enum={"external","internal"} | defines if the cluster is internal (i.e manager by the operator) or external. | No | `internal` | +| nodeURITemplate | string | used to dynamically compute node uri. | if external type | - | +| nifiURI | stringused access through a LB uri. | if external type | - | +| rootProcessGroupId | string | contains the uuid of the root process group for this cluster. | if external type | - | +| secretRef | \[ \][SecretReference](../4_nifi_parameter_context#secretreference) | reference the secret containing the informations required to authentiticate to the cluster. | if external type | - | +| proxyUrl | string | defines the proxy required to query the NiFi cluster. | if external type | - | +|service| [ServicePolicy](#servicepolicy) | defines the policy for services owned by NiFiKop operator. |No| - | +|pod| [PodPolicy](#podpolicy) | defines the policy for pod owned by NiFiKop operator. |No| - | +|zkAddress| string | specifies the ZooKeeper connection string in the form hostname:port where host and port are those of a Zookeeper server. |No| "" | +|zkPath| string | specifies the Zookeeper chroot path as part of its Zookeeper connection string which puts its data under same path in the global ZooKeeper namespace. |Yes| "/" | +|initContainerImage| string | can override the default image used into the init container to check if ZoooKeeper server is reachable.. |Yes| "busybox" | +|initContainers| \[ \]string | defines additional initContainers configurations. |No| \[ \] | +|clusterImage| string | can specify the whole nificluster image in one place. |No| "" | +|oneNifiNodePerNode| boolean | if set to true every nifi node is started on a new node, if there is not enough node to do that it will stay in pending state. If set to false the operator also tries to schedule the nifi node to a unique node but if the node number is insufficient the nifi node will be scheduled to a node where a nifi node is already running. |No| nil | +|propagateLabels| boolean | - |Yes| false | +|managedAdminUsers| \[ \][ManagedUser](#managedusers) | contains the list of users that will be added to the managed admin group (with all rights). |No| [] | +|managedReaderUsers| \[ \][ManagedUser](#managedusers) | contains the list of users that will be added to the managed admin group (with all rights). |No| [] | +|readOnlyConfig| [ReadOnlyConfig](./2_read_only_config.md) | specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly configurations, so it can be overwritten per node. |No| nil | +|nodeUserIdentityTemplate| string | specifies the template to be used when naming the node user identity (e.g. node-%d-mysuffix) |Yes| "node-%d-\" | +|nodeConfigGroups| map\[string\][NodeConfig](./3_node_config.md) | specifies multiple node configs with unique name |No| nil | +|nodes| \[ \][Node](./3_node_config.md) | specifies the list of cluster nodes, all node requires an image, unique id, and storageConfigs settings |Yes| nil +|disruptionBudget| [DisruptionBudget](#disruptionbudget) | defines the configuration for PodDisruptionBudget. |No| nil | +|ldapConfiguration| [LdapConfiguration](#ldapconfiguration) | specifies the configuration if you want to use LDAP. |No| nil | +|nifiClusterTaskSpec| [NifiClusterTaskSpec](#nificlustertaskspec) | specifies the configuration of the nifi cluster Tasks. |No| nil | +|listenersConfig| [ListenersConfig](./6_listeners_config.md) | specifies nifi's listener specifig configs. |No| - | +|sidecarConfigs| \[ \][Container](https://godoc.org/k8s.io/api/core/v1#Container) | Defines additional sidecar configurations. [Check documentation for more informations] | +|externalServices| \[ \][ExternalServiceConfigs](./7_external_service_config.md) | specifies settings required to access nifi externally. |No| - | +|topologySpreadConstraints| \[ \][TopologySpreadConstraint](https://godoc.org/k8s.io/api/core/v1#TopologySpreadConstraint) | specifies any TopologySpreadConstraint objects to be applied to all nodes. |No| nil | +|nifiControllerTemplate| string | NifiControllerTemplate specifies the template to be used when naming the node controller (e.g. %s-mysuffix) **Warning: once defined don't change this value either the operator will no longer be able to manage the cluster** |Yes| "%s-controller" | +|controllerUserIdentity| string | ControllerUserIdentity specifies what to call the static admin user's identity **Warning: once defined don't change this value either the operator will no longer be able to manage the cluster** |Yes| false | + + +## NifiClusterStatus + +| Field | Type | Description | Required | Default | +| ------------------ | ------------------------------------------- | ------------------------------------------------------------- | -------- | ------- | +| nodesState | map\[string\][NodeState](./5_node_state.md) | Store the state of each nifi node. | No | - | +| State | [ClusterState](#clusterstate) | Store the state of each nifi node. | Yes | - | +| rootProcessGroupId | string | contains the uuid of the root process group for this cluster. | No | - | + +## ServicePolicy + +| Field | Type | Description | Required | Default | +| --------------- |---------------------| --------------------------------------------------------------------------------------------------------------------------------------------------- |----------|-----------------------------------------------------------| +| headlessEnabled | boolean | specifies if the cluster should use headlessService for Nifi or individual services using service per nodes may come an handy case of service mesh. | Yes | false | +| serviceTemplate | string | specifies the template to be used when naming the service. | Yes | If headlessEnabled = true ? "%s-headless" = "%s-all-node" | +| annotations | map\[string\]string | Annotations specifies the annotations to attach to services the NiFiKop operator creates | No | - | +| labels | map\[string\]string | Labels specifies the labels to attach to services the NiFiKop operator creates | No | - | + + +## PodPolicy + +| Field | Type | Description | Required | Default | +| ----------- | ------------------- | ------------------------------------------------------------------------------------ | -------- | ------- | +| annotations | map\[string\]string | Annotations specifies the annotations to attach to pods the NiFiKop operator creates | No | - | +| labels | map\[string\]string | Labels specifies the Labels to attach to pods the NiFiKop operator creates | No | - | +| hostAliases | \[\][HostAlias](https://pkg.go.dev/k8s.io/api/core/v1#HostAlias) | A list of host aliases to include in every pod's /etc/hosts configuration in the scenario where DNS is not available. | No | \[\] | + +## ManagedUsers + +| Field | Type | Description | Required | Default | +| ------ | ------ | ----------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| identity | string | identity field is use to define the user identity on NiFi cluster side, it use full when the user's name doesn't suite with Kubernetes resource name. | No | - | +| name | string | name field is use to name the NifiUser resource, if not identity is provided it will be used to name the user on NiFi cluster side. | Yes | - | + +## DisruptionBudget + +| Field | Type | Description | Required | Default | +| -------- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| create | bool | if set to true, will create a podDisruptionBudget. | No | - | +| budget | string | the budget to set for the PDB, can either be static number or a percentage. | Yes | - | + +## LdapConfiguration + +| Field | Type | Description | Required | Default | +| ------------ | ------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -------- | ------- | +| enabled | boolean | if set to true, we will enable ldap usage into nifi.properties configuration. | No | false | +| url | string | space-separated list of URLs of the LDAP servers (i.e. ldap://${hostname}:${port}). | No | "" | +| searchBase | string | base DN for searching for users (i.e. CN=Users,DC=example,DC=com). | No | "" | +| searchFilter | string | Filter for searching for users against the 'User Search Base'. (i.e. sAMAccountName={0}). The user specified name is inserted into '{0}'. | No | "" | + +## NifiClusterTaskSpec + +| Field | Type | Description | Required | Default | +| -------------------- | ---- | ------------------------------------------------------------- | -------- | ------- | +| retryDurationMinutes | int | describes the amount of time the Operator waits for the task. | Yes | 5 | + +## ClusterState + +| Name | Value | Description | +| --------------------------- | ----------------------- | ------------------------------------------------------ | +| NifiClusterInitializing | ClusterInitializing | states that the cluster is still in initializing stage | +| NifiClusterInitialized | ClusterInitialized | states that the cluster is initialized | +| NifiClusterReconciling | ClusterReconciling | states that the cluster is still in reconciling stage | +| NifiClusterRollingUpgrading | ClusterRollingUpgrading | states that the cluster is rolling upgrading | +| NifiClusterRunning | ClusterRunning | states that the cluster is in running state | diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/2_read_only_config.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/2_read_only_config.md new file mode 100644 index 0000000000..883d420355 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/2_read_only_config.md @@ -0,0 +1,222 @@ +--- +id: 2_read_only_config +title: Read only configurations +sidebar_label: Read only configurations +--- + +ReadOnlyConfig object specifies the read-only type Nifi config cluster wide, all theses will be merged with node specified readOnly configurations, so it can be overwritten per node. + +```yaml +readOnlyConfig: + # MaximumTimerDrivenThreadCount define the maximum number of threads for timer driven processors available to the system. + maximumTimerDrivenThreadCount: 30 + # MaximumEventDrivenThreadCount define the maximum number of threads for event driven processors available to the system. + maximumEventDrivenThreadCount: 10 + # Logback configuration that will be applied to the node + logbackConfig: + # logback.xml configuration that will replace the one produced based on template + replaceConfigMap: + # The key of the value,in data content, that we want use. + data: logback.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # logback.xml configuration that will replace the one produced based on template and overrideConfigMap + replaceSecretConfig: + # The key of the value,in data content, that we want use. + data: logback.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # Authorizer configuration that will be applied to the node + authorizerConfig: + # An authorizers.xml configuration template that will replace the default template seen in authorizers.go + replaceTemplateConfigMap: + # The key of the value, in data content, that we want use. + data: authorizers.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # An authorizers.xml configuration template that will replace the default template seen in authorizers.go and the replaceTemplateConfigMap + replaceTemplateSecretConfig: + # The key of the value,in data content, that we want use. + data: authorizers.xml + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # NifiProperties configuration that will be applied to the node. + nifiProperties: + # Additionnals nifi.properties configuration that will override the one produced based on template and + # configuration + overrideConfigMap: + # The key of the value,in data content, that we want use. + data: nifi.properties + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop. + # Additionnals nifi.properties configuration that will override the one produced based + # on template, configurations, overrideConfigMap and overrideConfigs. + overrideSecretConfig: + # The key of the value,in data content, that we want use. + data: nifi.properties + # Name of the configmap that we want to refer. + name: raw + # Namespace where is located the secret that we want to refer. + namespace: nifikop + # Additionnals nifi.properties configuration that will override the one produced based + # on template, configurations and overrideConfigMap + overrideConfigs: | + nifi.ui.banner.text=NiFiKop + # A comma separated list of allowed HTTP Host header values to consider when NiFi + # is running securely and will be receiving requests to a different host[:port] than it is bound to. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties + # webProxyHosts: + # Nifi security client auth + needClientAuth: false + # Indicates which of the configured authorizers in the authorizers.xml file to use + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration + # authorizer: + # ZookeeperProperties configuration that will be applied to the node. + zookeeperProperties: + # # Additionnals zookeeeper.properties configuration that will override the one produced based on template and + # # configuration + # overrideConfigMap: + # # The key of the value,in data content, that we want use. + # data: zookeeeper.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop. + # # Additionnals zookeeeper.properties configuration that will override the one produced based + # # on template, configurations, overrideConfigMap and overrideConfigs. + # overrideSecretConfig: + # # The key of the value,in data content, that we want use. + # data: zookeeeper.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop + # Additionnals zookeeper.properties configuration that will override the one produced based + # on template and configurations. + overrideConfigs: | + initLimit=15 + autopurge.purgeInterval=24 + syncLimit=5 + tickTime=2000 + dataDir=./state/zookeeper + autopurge.snapRetainCount=30 + # BootstrapProperties configuration that will be applied to the node. + bootstrapProperties: + # # Additionnals bootstrap.properties configuration that will override the one produced based on template and + # # configuration + # overrideConfigMap: + # # The key of the value,in data content, that we want use. + # data: bootstrap.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop. + # # Additionnals bootstrap.properties configuration that will override the one produced based + # # on template, configurations, overrideConfigMap and overrideConfigs. + # overrideSecretConfig: + # # The key of the value,in data content, that we want use. + # data: bootstrap.properties + # # Name of the configmap that we want to refer. + # name: raw + # # Namespace where is located the secret that we want to refer. + # namespace: nifikop + # JVM memory settings + nifiJvmMemory: "512m" + # Additionnals bootstrap.properties configuration that will override the one produced based + # on template and configurations. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#bootstrap_properties + overrideConfigs: | + # java.arg.4=-Djava.net.preferIPv4Stack=true +``` + +## ReadOnlyConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|maximumTimerDrivenThreadCount|int32|define the maximum number of threads for timer driven processors available to the system.|No|10| +|maximumEventDrivenThreadCount|int32|define the maximum number of threads for event driven processors available to the system.|No|1| +|additionalSharedEnvs|\[ \][corev1.EnvVar](https://pkg.go.dev/k8s.io/api/core/v1#EnvVar)|define a set of additional env variables that will shared between all init containers and ontainers in the pod..|No|\[ \]| +|nifiProperties|[NifiProperties](#nifiproperties)|nifi.properties configuration that will be applied to the node.|No|nil| +|zookeeperProperties|[ZookeeperProperties](#zookeeperproperties)|zookeeper.properties configuration that will be applied to the node.|No|nil| +|bootstrapProperties|[BootstrapProperties](#bootstrapproperties)|bootstrap.conf configuration that will be applied to the node.|No|nil| +|logbackConfig|[LogbackConfig](#logbackconfig)|logback.xml configuration that will be applied to the node.|No|nil| +|authorizerConfig|[AuthorizerConfig](#authorizerconfig)|authorizers.xml configuration template that will be applied to the node.|No|nil| +|bootstrapNotificationServicesConfig|[BootstrapNotificationServices](#bootstrapnotificationservices)|bootstrap_notification_services.xml configuration that will be applied to the node.|No|nil| + + + +## NifiProperties + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|overrideConfigMap|[ConfigmapReference](#configmapreference)|Additionnals nifi.properties configuration that will override the one produced based on template and configuration.|No|nil| +|overrideConfigs|string|Additionnals nifi.properties configuration that will override the one produced based on template, configurations and overrideConfigMap.|No|""| +|overrideSecretConfig|[SecretConfigReference](#secretconfigreference)|Additionnals nifi.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs.|No|nil| +|webProxyHosts|\[ \]string| A list of allowed HTTP Host header values to consider when NiFi is running securely and will be receiving requests to a different host[:port] than it is bound to. [web-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#web-properties)|No|""| +|needClientAuth|boolean|Nifi security client auth.|No|false| +|authorizer|string|Indicates which of the configured authorizers in the authorizers.xml file to use [authorizer-configuration](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#authorizer-configuration)|No|"managed-authorizer"| + + +## ZookeeperProperties + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|overrideConfigMap|[ConfigmapReference](#configmapreference)|Additionnals zookeeper.properties configuration that will override the one produced based on template and configuration.|No|nil| +|overrideConfigs|string|Additionnals zookeeper.properties configuration that will override the one produced based on template, configurations and overrideConfigMap.|No|""| +|overrideSecretConfig|[SecretConfigReference](#secretconfigreference)|Additionnals zookeeper.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs.|No|nil| + +## BootstrapProperties + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|overrideConfigMap|[ConfigmapReference](#configmapreference)|Additionnals bootstrap.properties configuration that will override the one produced based on template and configuration.|No|nil| +|overrideConfigs|string|Additionnals bootstrap.properties configuration that will override the one produced based on template, configurations and overrideConfigMap.|No|""| +|overrideSecretConfig|[SecretConfigReference](#secretconfigreference)|Additionnals bootstrap.properties configuration that will override the one produced based on template, configurations, overrideConfigMap and overrideConfigs.|No|nil| +|NifiJvmMemory|string|JVM memory settings.|No|"512m"| + +## LogbackConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|replaceConfigMap|[ConfigmapReference](#configmapreference)|logback.xml configuration that will replace the one produced based on template.|No|nil| +|replaceSecretConfig|[SecretConfigReference](#secretconfigreference)|logback.xml configuration that will replace the one produced based on template and overrideConfigMap.|No|nil| + +## AuthorizerConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|replaceTemplateConfigMap|[ConfigmapReference](#configmapreference)|authorizers.xml configuration template that will replace the default template.|No|nil| +|replaceTemplateSecretConfig|[SecretConfigReference](#secretconfigreference)|authorizers.xml configuration that will replace the default template and the replaceTemplateConfigMap.|No|nil| + +## BootstrapNotificationServicesConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|replaceConfigMap|[ConfigmapReference](#configmapreference)|bootstrap_notifications_services.xml configuration that will replace the one produced based on template.|No|nil| +|replaceSecretConfig|[SecretConfigReference](#secretconfigreference)|bootstrap_notifications_services.xml configuration that will replace the one produced based on template and overrideConfigMap.|No|nil| + +## ConfigmapReference + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string|Name of the configmap that we want to refer.|Yes|""| +|namespace|string|Namespace where is located the configmap that we want to refer.|No|""| +|data|string|The key of the value,in data content, that we want use.|Yes|""| + +## SecretConfigReference + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string|Name of the secret that we want to refer.|Yes|""| +|namespace|string|Namespace where is located the secret that we want to refer.|No|""| +|data|string|The key of the value,in data content, that we want use.|Yes|""| \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/3_node_config.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/3_node_config.md new file mode 100644 index 0000000000..9675b31f0d --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/3_node_config.md @@ -0,0 +1,108 @@ +--- +id: 3_node_config +title: Node configuration +sidebar_label: Node configuration +--- + +NodeConfig defines the node configuration + +```yaml + default_group: + # provenanceStorage allow to specify the maximum amount of data provenance information to store at a time + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties + provenanceStorage: "10 GB" + #RunAsUser define the id of the user to run in the Nifi image + # +kubebuilder:validation:Minimum=1 + runAsUser: 1000 + # Set this to true if the instance is a node in a cluster. + # https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup + isNode: true + # Additionnal metadata to merge to the pod associated + podMetadata: + annotations: + node-annotation: "node-annotation-value" + labels: + node-label: "node-label-value" + # Docker image used by the operator to create the node associated + # https://hub.docker.com/r/apache/nifi/ +# image: "apache/nifi:1.11.2" + # nodeAffinity can be specified, operator populates this value if new pvc added later to node + # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity +# nodeAffinity: + # imagePullPolicy define the pull policy for NiFi cluster docker image + imagePullPolicy: IfNotPresent + # priorityClassName define the name of the priority class to be applied to these nodes + priorityClassName: "example-priority-class-name" + # externalVolumeConfigs specifies a list of volume to mount into the main container. + externalVolumeConfigs: + - name: example-volume + mountPath: "/opt/nifi/example" + secret: + secretName: "raw-controller" + # storageConfigs specifies the node related configs + storageConfigs: + # Name of the storage config, used to name PV to reuse into sidecars for example. + - name: provenance-repository + # Path where the volume will be mount into the main nifi container inside the pod. + mountPath: "/opt/nifi/provenance_repository" + # Kubernetes PVC spec + # https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: "standard" + resources: + requests: + storage: 10Gi + - mountPath: "/opt/nifi/nifi-current/logs" + name: logs + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: "standard" + resources: + requests: + storage: 10Gi +``` + +## NodeConfig + +| Field | Type |Description|Required|Default| +|-----------------------|----------------------------------------------------------------------------------------------|-----------|--------|--------| +| provenanceStorage | string |provenanceStorage allow to specify the maximum amount of data provenance information to store at a time: [write-ahead-provenance-repository-properties](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#write-ahead-provenance-repository-properties)|No|"8 GB"| +| runAsUser | int64 |define the id of the user to run in the Nifi image|No|1000| +| fsGroup | int64 |define the id of the group for each volumes in Nifi image|No|1000| +| isNode | boolean |Set this to true if the instance is a node in a cluster: [basic-cluster-setup](https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#basic-cluster-setup)|No|true| +| image | string | Docker image used by the operator to create the node associated. [Nifi docker registry](https://hub.docker.com/r/apache/nifi/)|No|""| +| imagePullPolicy | [PullPolicy](https://godoc.org/k8s.io/api/core/v1#PullPolicy) | define the pull policy for NiFi cluster docker image.)|No|""| +| nodeAffinity | string | operator populates this value if new pvc added later to node [node-affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity)|No|nil| +| storageConfigs | \[ \][StorageConfig](#storageconfig) |specifies the node related configs.|No|nil| +| externalVolumeConfigs | \[ \][ExternalVolumeConfig](#externalvolumeconfig) |specifies a list of volume to mount into the main container.|No|nil| +| serviceAccountName | string |specifies the serviceAccount used for this specific node.|No|"default"| +| resourcesRequirements | [ResourceRequirements](https://godoc.org/k8s.io/api/core/v1#ResourceRequirements) | works exactly like Container resources, the user can specify the limit and the requests through this property [manage-compute-resources-container](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/).|No|nil| +| imagePullSecrets | \[ \][LocalObjectReference](https://godoc.org/k8s.io/api/core/v1#TypedLocalObjectReference) |specifies the secret to use when using private registry.|No|nil| +| nodeSelector | map\[string\]string |nodeSelector can be specified, which set the pod to fit on a node [nodeselector](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector)|No|nil| +| tolerations | \[ \][Toleration](https://godoc.org/k8s.io/api/core/v1#Toleration) |tolerations can be specified, which set the pod's tolerations [taint-and-toleration](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/#concepts).|No|nil| +| podMetadata | [Metadata](#metadata) |define additionnal metadata to merge to the pod associated.|No|nil| +| hostAliases | \[\][HostAlias](https://pkg.go.dev/k8s.io/api/core/v1#HostAlias) | A list of host aliases to include in each pod's /etc/hosts configuration in the scenario where DNS is not available. | No | \[\] | +| priorityClassName | string | Specify the name of the priority class to apply to pods created with this node config | No | nil| + +## StorageConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string|Name of the storage config, used to name PV to reuse into sidecars for example.|Yes| - | +|mountPath|string|Path where the volume will be mount into the main nifi container inside the pod.|Yes| - | +|pvcSpec|[PersistentVolumeClaimSpec](https://godoc.org/k8s.io/api/core/v1#PersistentVolumeClaimSpec)|Kubernetes PVC spec. [create-a-persistentvolumeclaim](https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim).|Yes| - | + +## ExternalVolumeConfig + +| Field |Type| Description |Required|Default| +|-------------------------------------------------------------------|----|-------------|--------|--------| +|| [VolueMount](https://pkg.go.dev/k8s.io/api/core/v1#VolumeMount) |describes a mounting of a Volume within a container.| Yes | - | +|| [VolumeSource](https://pkg.go.dev/k8s.io/api/core/v1#VolumeSource) | VolumeSource represents the location and type of the mounted volume. | Yes | - | + +## Metadata + +| annotations | map\[string\]string | Additionnal annotation to merge to the pod associated [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set). |No|nil| +| nodeLabels | map\[string\]string | Additionnal labels to merge to the pod associated [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). |No|nil| diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/4_node.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/4_node.md new file mode 100644 index 0000000000..63fab4f388 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/4_node.md @@ -0,0 +1,60 @@ +--- +id: 4_node +title: Node +sidebar_label: Node +--- + +Node defines the nifi node basic configuration + +```yaml + - id: 0 + # nodeConfigGroup can be used to ease the node configuration, if set only the id is required + nodeConfigGroup: "default_group" + # readOnlyConfig can be used to pass Nifi node config + # which has type read-only these config changes will trigger rolling upgrade + readOnlyConfig: + nifiProperties: + overrideConfigs: | + nifi.ui.banner.text=NiFiKop - Node 0 + # node configuration +# nodeConfig: + - id: 2 + # readOnlyConfig can be used to pass Nifi node config + # which has type read-only these config changes will trigger rolling upgrade + readOnlyConfig: + overrideConfigs: | + nifi.ui.banner.text=NiFiKop - Node 2 + # node configuration + nodeConfig: + resourcesRequirements: + limits: + cpu: "2" + memory: 3Gi + requests: + cpu: "1" + memory: 1Gi + storageConfigs: + # Name of the storage config, used to name PV to reuse into sidecars for example. + - name: provenance-repository + # Path where the volume will be mount into the main nifi container inside the pod. + mountPath: "/opt/nifi/provenance_repository" + # Kubernetes PVC spec + # https://kubernetes.io/docs/tasks/configure-pod-container/configure-persistent-volume-storage/#create-a-persistentvolumeclaim + pvcSpec: + accessModes: + - ReadWriteOnce + storageClassName: "standard" + resources: + requests: + storage: 8Gi +``` + +## Node + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|int32| unique Node id. |Yes| - | +|nodeConfigGroup|string| can be used to ease the node configuration, if set only the id is required |No| "" | +|readOnlyConfig|[ReadOnlyConfig](./2_read_only_config.md)| readOnlyConfig can be used to pass Nifi node config which has type read-only these config changes will trigger rolling upgrade.| No | nil | +|nodeConfig|[NodeConfig](./3_node_config.md)| node configuration. |No| nil | + diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/5_node_state.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/5_node_state.md new file mode 100644 index 0000000000..9b04fc0195 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/5_node_state.md @@ -0,0 +1,73 @@ +--- +id: 5_node_state +title: Node state +sidebar_label: Node state +--- + +Holds information about nifi state + +## NodeState + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|gracefulActionState|[GracefulActionState](#gracefulactionstate)| holds info about nifi cluster action status.| - | - | +|configurationState|[ConfigurationState](#configurationstate)| holds info about the config.| - | - | +|initClusterNode|[InitClusterNode](#initclusternode)| contains if this nodes was part of the initial cluster.| - | - | +|podIsReady|bool| True if the pod for this node is up and running. Otherwise false.| - | - | +|creationTime|[v1.Time](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Time)| The time at which this node was created and added to the cluster| - | - | + + +## GracefulActionState + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|errorMessage|string| holds the information what happened with Nifi Cluster. | - | "" | +|actionStep|[ActionStep](#actionstep)| holds info about the action step ran.| No | nil | +|taskStarted|string| hold the time when the execution started.| No | "" | +|actionState|[State](#state)| holds the information about Action state.| No | nil | + +## ConfigurationState + +|Name|Value|Description| +|-----|----|------------| +|ConfigInSync|ConfigInSync|states that the generated nodeConfig is in sync with the Node| +|ConfigOutOfSync|ConfigOutOfSync|states that the generated nodeConfig is out of sync with the Node| + +## InitClusterNode + +|Name|Value|Description| +|-----|----|------------| +|IsInitClusterNode|true|states the node is part of initial cluster setup| +|NotInitClusterNode|false|states the node is not part of initial cluster setup| + +## State + +### Upscale + +|Name|Value|Description| +|-----|----|------------| +|GracefulUpscaleRequired|GracefulUpscaleRequired|states that a node upscale is required.| +|GracefulUpscaleRunning|GracefulUpscaleRunning|states that the node upscale task is still running.| +|GracefulUpscaleSucceeded|GracefulUpscaleSucceeded|states the node is updated gracefully.| + +### Downscale + +|Name|Value|Description| +|-----|----|------------| +|GracefulDownscaleRequired|GracefulDownscaleRequired|states that a node downscale is required| +|GracefulDownscaleRunning|GracefulDownscaleRunning|states that the node downscale is still running in| +|GracefulUpscaleSucceeded|GracefulUpscaleSucceeded|states the node is updated gracefully| + +## ActionStep +|Name|Value|Description| +|-----|----|------------| +|DisconnectNodeAction|DISCONNECTING|states that the NiFi node is disconnecting from NiFi Cluster.| +|DisconnectStatus|DISCONNECTED|states that the NiFi node is disconnected from NiFi Cluster.| +|OffloadNodeAction|OFFLOADING|states that the NiFi node is offloading data to NiFi Cluster.| +|OffloadStatus|OFFLOADED|states that the NiFi node offloaded data to NiFi Cluster.| +|RemovePodAction|POD_REMOVING|states that the NiFi node pod and object related are removing by operator.| +|RemovePodStatus|POD_REMOVED|states that the NiFi node pod and object related have been removed by operator.| +|RemoveNodeAction|REMOVING|states that the NiFi node is removing from NiFi Cluster.| +|RemoveStatus|REMOVED|states that the NiFi node is removed from NiFi Cluster.| +|ConnectNodeAction|CONNECTING|states that the NiFi node is connecting to the NiFi Cluster.| +|ConnectStatus|CONNECTED|states that the NiFi node is connected to the NiFi Cluster.| \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/6_listeners_config.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/6_listeners_config.md new file mode 100644 index 0000000000..378d525b35 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/6_listeners_config.md @@ -0,0 +1,56 @@ +--- +id: 6_listeners_config +title: Listeners Config +sidebar_label: Listeners Config +--- + +ListenersConfig defines the Nifi listener types : + +```yaml + listenersConfig: + internalListeners: + - type: "https" + name: "https" + containerPort: 8443 + - type: "cluster" + name: "cluster" + containerPort: 6007 + - type: "s2s" + name: "s2s" + containerPort: 10000 + - type: "prometheus" + name: "prometheus" + containerPort: 9090 + sslSecrets: + tlsSecretName: "test-nifikop" + create: true +``` + +## ListenersConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|internalListeners|\[ \][InternalListener](#internallistener)| specifies settings required to access nifi internally.| Yes | - | +|sslSecrets|[SSLSecrets](#sslsecrets)| contains information about ssl related kubernetes secrets if one of the listener setting type set to ssl these fields must be populated to.| Yes | nil | +|clusterDomain|string| allow to override the default cluster domain which is "cluster.local".| Yes | `cluster.local` | +|useExternalDNS|string| allow to manage externalDNS usage by limiting the DNS names associated to each nodes and load balancer: `-node-...`| Yes | false | + +## InternalListener + +Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|type|enum{ "cluster", "http", "https", "s2s", "prometheus"}| allow to specify if we are in a specific nifi listener it's allowing to define some required information such as Cluster Port, Http Port, Https Port, S2S or Prometheus port| Yes | - | +|name|string| an identifier for the port which will be configured. | Yes | - | +|containerPort|int32| the containerPort. | Yes | - | + + +## SSLSecrets + +Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|tlsSecretName|string| should contain all ssl certs required by nifi including: caCert, caKey, clientCert, clientKey serverCert, serverKey, peerCert, peerKey. | Yes | - | +|create|boolean| tells the installed cert manager to create the required certs keys. | Yes | - | +|clusterScoped|boolean| defines if the Issuer created is cluster or namespace scoped. | Yes | - | +|issuerRef|[ObjectReference](https://docs.cert-manager.io/en/release-0.9/reference/api-docs/index.html#objectreference-v1alpha1)| IssuerRef allow to use an existing issuer to act as CA: https://cert-manager.io/docs/concepts/issuer/ | No | - | +|pkiBackend|enum{"cert-manager"}| | Yes | - | + diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/7_external_service_config.md b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/7_external_service_config.md new file mode 100644 index 0000000000..1e62f13136 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/1_nifi_cluster/7_external_service_config.md @@ -0,0 +1,56 @@ +--- +id: 7_external_service_config +title: External Service Config +sidebar_label: External Service Config +--- + +ListenersConfig defines the Nifi listener types : + +```yaml + externalServices: + - name: "clusterip" + spec: + type: ClusterIP + portConfigs: + - port: 8080 + internalListenerName: "http" + metadata: + annotations: + toto: tata + labels: + titi: tutu +``` + +## ExternalServiceConfig + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string| must be unique within a namespace. Name is primarily intended for creation idempotence and configuration.| Yes | - | +|metadata|[Metadata](#metadata)|define additionnal metadata to merge to the service associated.| No | - | +|spec|[ExternalServiceSpec](#externalservicespec)| defines the behavior of a service.| Yes | | + +## ExternalServiceSpec + +Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|portConfigs||\[ \][PortConfig](#portconfig)| Contains the list port for the service and the associated listener| Yes | - | +|clusterIP|string| More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies | No | - | +|type|[ServiceType](https://godoc.org/k8s.io/api/core/v1#ServiceType)| type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. | No | - | +|externalIPs|\[ \]string| externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes | No | - | +|loadBalancerIP|string| Only applies to Service Type: LoadBalancer. LoadBalancer will get created with the IP specified in this field. | No | - | +|loadBalancerSourceRanges|\[ \]string| If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs | No | - | +|externalName|string| externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. | No | - | + +## PortConfig + +Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|port|int32| The port that will be exposed by this service. | Yes | - | +|internalListenerName| string| The name of the listener which will be used as target container. | Yes | - | + +## Metadata + +Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +| annotations | map\[string\]string | Additionnal annotation to merge to the service associated [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set). |No|nil| +| nodeLabels | map\[string\]string | Additionnal labels to merge to the service associated [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set). |No|nil| diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/2_nifi_user.md b/site/website/versioned_docs/version-v0.13.0/5_references/2_nifi_user.md new file mode 100644 index 0000000000..6030e318d6 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/2_nifi_user.md @@ -0,0 +1,101 @@ +--- +id: 2_nifi_user +title: NiFi User +sidebar_label: NiFi User +--- + +`NifiUser` is the Schema for the nifi users API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiUser +metadata: + name: aguitton +spec: + identity: alexandre.guitton@orange.com + clusterRef: + name: nc + namespace: nifikop + createCert: false +``` + +## NifiUser +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects users must create.|No|nil| +|spec|[NifiUserSpec](#nifiuserspec)|defines the desired state of NifiUser.|No|nil| +|status|[NifiUserStatus](#nifiuserstatus)|defines the observed state of NifiUser.|No|nil| + +## NifiUserSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|identity|string| used to define the user identity on NiFi cluster side, when the user's name doesn't suit with Kubernetes resource name. |No| - | +|secretName|string| name of the secret where all cert resources will be stored. |No| - | +|clusterRef|[ClusterReference](#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | +|DNSNames|\[ \]string| list of DNSNames that the user will used to request the NifiCluster (allowing to create the right certificates associated). |Yes| - | +|includeJKS|boolean| whether or not the the operator also include a Java keystore format (JKS) with you secret. |Yes| - | +|createCert|boolean| whether or not a certificate will be created for this user. |No| - | +|accessPolicies|\[ \][AccessPolicy](#accesspolicy)| defines the list of access policies that will be granted to the group. |No| [] | + + +## NifiUserStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|string| the nifi user's node id.|Yes| - | +|version|string| the last nifi user's node revision version catched.|Yes| - | + +## ClusterReference + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string| name of the NifiCluster. |Yes| - | +|namespace|string| the NifiCluster namespace location. |Yes| - | + +## AccessPolicy + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|type|[AccessPolicyType](#accesspolicytype)| defines the kind of access policy, could be "global" or "component". |Yes| - | +|action|[AccessPolicyAction](#accesspolicyaction)| defines the kind of action that will be granted, could be "read" or "write". |Yes| - | +|resource|[AccessPolicyResource](#accesspolicyresource)| defines the kind of resource targeted by this access policies, please refer to the following page : https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#access-policies |Yes| - | +|componentType|string| used if the type is "component", it allows to define the kind of component on which is the access policy. |No| - | +|componentId|string| used if the type is "component", it allows to define the id of the component on which is the access policy. |No| - | + +## AccessPolicyType + +|Name|Value|Description| +|-----|----|------------| +|GlobalAccessPolicyType|global|Global access policies govern the following system level authorizations| +|ComponentAccessPolicyType|component|Component level access policies govern the following component level authorizations| + +## AccessPolicyAction + +|Name|Value|Description| +|-----|----|------------| +|ReadAccessPolicyAction|read|Allows users to view| +|WriteAccessPolicyAction|write|Allows users to modify| + +## AccessPolicyResource + +|Name|Value|Description| +|-----|----|------------| +|FlowAccessPolicyResource|/flow|About the UI| +|ControllerAccessPolicyResource|/controller| about the controller including Reporting Tasks, Controller Services, Parameter Contexts and Nodes in the Cluster| +|ParameterContextAccessPolicyResource|/parameter-context|About the Parameter Contexts. Access to Parameter Contexts are inherited from the "access the controller" policies unless overridden.| +|ProvenanceAccessPolicyResource|/provenance|Allows users to submit a Provenance Search and request Event Lineage| +|RestrictedComponentsAccessPolicyResource|/restricted-components|About the restricted components assuming other permissions are sufficient. The restricted components may indicate which specific permissions are required. Permissions can be granted for specific restrictions or be granted regardless of restrictions. If permission is granted regardless of restrictions, the user can create/modify all restricted components.| +|PoliciesAccessPolicyResource|/policies|About the policies for all components| +|TenantsAccessPolicyResource|/tenants| About the users and user groups| +|SiteToSiteAccessPolicyResource|/site-to-site|Allows other NiFi instances to retrieve Site-To-Site details| +|SystemAccessPolicyResource|/system|Allows users to view System Diagnostics| +|ProxyAccessPolicyResource|/proxy|Allows proxy machines to send requests on the behalf of others| +|CountersAccessPolicyResource|/counters|About counters| +|ComponentsAccessPolicyResource|/| About the component configuration details| +|OperationAccessPolicyResource|/operation|to operate components by changing component run status (start/stop/enable/disable), remote port transmission status, or terminating processor threads| +|ProvenanceDataAccessPolicyResource|/provenance-data|to view provenance events generated by this component| +|DataAccessPolicyResource|/data|About metadata and content for this component in flowfile queues in outbound connections and through provenance events| +|PoliciesComponentAccessPolicyResource|/policies|-| +|DataTransferAccessPolicyResource|/data-transfer|Allows a port to receive data from NiFi instances| + diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/3_nifi_registry_client.md b/site/website/versioned_docs/version-v0.13.0/5_references/3_nifi_registry_client.md new file mode 100644 index 0000000000..60e3365f99 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/3_nifi_registry_client.md @@ -0,0 +1,42 @@ +--- +id: 3_nifi_registry_client +title: NiFi Registry Client +sidebar_label: NiFi Registry Client +--- + +`NifiRegistryClient` is the Schema for the NiFi registry client API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiRegistryClient +metadata: + name: squidflow +spec: + clusterRef: + name: nc + namespace: nifikop + description: "Squidflow demo" + uri: "http://nifi-registry:18080" +``` + +## NifiRegistryClient +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects registry clients must create.|No|nil| +|spec|[NifiRegistryClientSpec](#nifiregistryclientspec)|defines the desired state of NifiRegistryClient.|No|nil| +|status|[NifiRegistryClientStatus](#nifiregistryclientstatus)|defines the observed state of NifiRegistryClient.|No|nil| + +## NifiRegistryClientsSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|description|string| describes the Registry client. |No| - | +|uri|string| URI of the NiFi registry that should be used for pulling the flow. |Yes| - | +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | + +## NifiRegistryClientStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|string| nifi registry client's id. |Yes| - | +|version|int64| the last nifi registry client revision version catched. |Yes| - | \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/4_nifi_parameter_context.md b/site/website/versioned_docs/version-v0.13.0/5_references/4_nifi_parameter_context.md new file mode 100644 index 0000000000..253c418d9c --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/4_nifi_parameter_context.md @@ -0,0 +1,86 @@ +--- +id: 4_nifi_parameter_context +title: NiFi Parameter Context +sidebar_label: NiFi Parameter Context +--- + +`NifiParameterContext` is the Schema for the NiFi parameter context API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiParameterContext +metadata: + name: dataflow-lifecycle +spec: + description: "It is a test" + clusterRef: + name: nc + namespace: nifikop + secretRefs: + - name: secret-params + namespace: nifikop + parameters: + - name: test + value: toto + description: tutu + - name: test2 + description: toto + sensistive: true +``` + +## NifiParameterContext + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects parameter contexts must create.|No|nil| +|spec|[NifiParameterContextSpec](#NifiParameterContextspec)|defines the desired state of NifiParameterContext.|No|nil| +|status|[NifiParameterContextStatus](#NifiParameterContextstatus)|defines the observed state of NifiParameterContext.|No|nil| + +## NifiParameterContextsSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|description|string| describes the Parameter Context. |No| - | +|parameters|\[ \][Parameter](#parameter)| a list of non-sensitive Parameters. |Yes| - | +|secretRefs|\[ \][SecretReference](#secretreference)| a list of secret containing sensitive parameters (the key will name of the parameter) |No| - | +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | +|disableTakeOver|bool| whether or not the operator should take over an existing parameter context if its name is the same. |No| - | + +## NifiParameterContextStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|string| nifi parameter context's id. |Yes| - | +|version|int64| the last nifi parameter context revision version catched. |Yes| - | +|latestUpdateRequest|[ParameterContextUpdateRequest](#parametercontextupdaterequest)|the latest update request. |Yes| - | +|version|int64| the last nifi parameter context revision version catched. |Yes| - | + +## Parameter + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string| the name of the Parameter. |Yes| - | +|value|string| the value of the Parameter. |No| - | +|description|string| the description of the Parameter. |No| - | +|sensitive|string| Whether the parameter is sensitive or not. |No| false | + +## SecretReference + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string| name of the secret. |Yes| - | +|namespace|string| the secret namespace location. |Yes| - | + + +## ParameterContextUpdateRequest + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|string| the id of the update request. |Yes| - | +|uri|string| the uri for this request. |Yes| - | +|submissionTime|string| the timestamp of when the request was submitted This property is read only. |Yes| - | +|lastUpdated|string| the timestamp of when the request was submitted This property is read only. |Yes| - | +|complete|bool| whether or not this request has completed. |Yes| false | +|failureReason|string| an explication of why the request failed, or null if this request has not failed. |Yes| - | +|percentCompleted|int32| the percentage complete of the request, between 0 and 100. |Yes| - | +|state|string| the state of the request. |Yes| - | \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/5_nifi_dataflow.md b/site/website/versioned_docs/version-v0.13.0/5_references/5_nifi_dataflow.md new file mode 100644 index 0000000000..ee3b54b5d5 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/5_nifi_dataflow.md @@ -0,0 +1,136 @@ +--- +id: 5_nifi_dataflow +title: NiFi Dataflow +sidebar_label: NiFi Dataflow +--- + +`NifiDataflow` is the Schema for the NiFi dataflow API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiDataflow +metadata: + name: dataflow-lifecycle +spec: + parentProcessGroupID: "16cfd2ec-0174-1000-0000-00004b9b35cc" + bucketId: "01ced6cc-0378-4893-9403-f6c70d080d4f" + flowId: "9b2fb465-fb45-49e7-94fe-45b16b642ac9" + flowVersion: 2 + flowPosition: + posX: 0 + posY: 0 + syncMode: always + skipInvalidControllerService: true + skipInvalidComponent: true + clusterRef: + name: nc + namespace: nifikop + registryClientRef: + name: squidflow + namespace: nifikop + parameterContextRef: + name: dataflow-lifecycle + namespace: nifikop + updateStrategy: drain +``` + +## NifiDataflow + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects dataflows must create.|No|nil| +|spec|[NifiDataflowSpec](#NifiDataflowspec)|defines the desired state of NifiDataflow.|No|nil| +|status|[NifiDataflowStatus](#NifiDataflowstatus)|defines the observed state of NifiDataflow.|No|nil| + + +## NifiDataflowsSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|parentProcessGroupID|string|the UUID of the parent process group where you want to deploy your dataflow, if not set deploy at root level. |No| - | +|bucketId|string|the UUID of the Bucket containing the flow. |Yes| - | +|flowId|string|the UUID of the flow to run. |Yes| - | +|flowVersion|*int32|the version of the flow to run. |Yes| - | +|flowPosition|[FlowPosition](#flowposition)|the position of your dataflow in the canvas. |No| - | +|syncMode|Enum={"never","always","once"}|if the flow will be synchronized once, continuously or never. |No| always | +|skipInvalidControllerService|bool|whether the flow is considered as ran if some controller services are still invalid or not. |Yes| false | +|skipInvalidComponent|bool|whether the flow is considered as ran if some components are still invalid or not. |Yes| false | +|updateStrategy|[DataflowUpdateStrategy](#dataflowupdatestrategy)|describes the way the operator will deal with data when a dataflow will be updated : Drop or Drain |Yes| drain | +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | +|parameterContextRef|[ParameterContextReference](./4_nifi_parameter_context.md#parametercontextreference)| contains the reference to the ParameterContext with the one the dataflow is linked. |No| - | +|registryClientRef|[RegistryClientReference](./3_nifi_registry_client.md#registryclientreference)| contains the reference to the NifiRegistry with the one the dataflow is linked. |Yes| - | + +## NifiDataflowStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|processGroupID|string| process Group ID. |Yes| - | +|state|[DataflowState](#dataflowstate)| the dataflow current state. |Yes| - | +|latestUpdateRequest|[UpdateRequest](#updaterequest)|the latest update request sent. |Yes| - | +|latestDropRequest|[DropRequest](#droprequest)|the latest queue drop request sent. |Yes| - | + +## DataflowUpdateStrategy + +|Name|Value|Description| +|-----|----|------------| +|DrainStrategy|drain|leads to shutting down only input components (Input processors, remote input process group) and dropping all flowfiles from the flow.| +|DropStrategy|drop|leads to shutting down all components and dropping all flowfiles from the flow.| + +## DataflowState + +|Name|Value|Description| +|-----|----|------------| +|DataflowStateCreated|Created|describes the status of a NifiDataflow as created.| +|DataflowStateStarting|Starting|describes the status of a NifiDataflow as starting.| +|DataflowStateRan|Ran|describes the status of a NifiDataflow as running.| +|DataflowStateOutOfSync|OutOfSync|describes the status of a NifiDataflow as out of sync.| +|DataflowStateInSync|InSync|describes the status of a NifiDataflow as in sync.| + +## UpdateRequest + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|type|[DataflowUpdateRequestType](#dataflowupdaterequesttype)|defines the type of versioned flow update request. |Yes| - | +|id|string|the id of the update request. |Yes| - | +|uri|string|the uri for this request. |Yes| - | +|lastUpdated|string|the last time this request was updated. |Yes| - | +|complete|bool| whether or not this request has completed. |Yes| false | +|failureReason|string| an explication of why the request failed, or null if this request has not failed. |Yes| - | +|percentCompleted|int32| the percentage complete of the request, between 0 and 100. |Yes| 0 | +|state|string| the state of the request. |Yes| - | + +## DropRequest + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|connectionId|string|the connection id. |Yes| - | +|id|string|the id for this drop request. |Yes| - | +|uri|string|the uri for this request. |Yes| - | +|lastUpdated|string|the last time this request was updated. |Yes| - | +|finished|bool|whether the request has finished. |Yes| false | +|failureReason|string|an explication of why the request failed, or null if this request has not failed. |Yes| - | +|percentCompleted|int32|the percentage complete of the request, between 0 and 100. |Yes| 0 | +|currentCount|int32|the number of flow files currently queued. |Yes| 0 | +|currentSize|int64| the size of flow files currently queued in bytes. |Yes| 0 | +|current|string|the count and size of flow files currently queued. |Yes| - | +|originalCount|int32|the number of flow files to be dropped as a result of this request. |Yes| 0 | +|originalSize|int64| the size of flow files to be dropped as a result of this request in bytes. |Yes| 0 | +|original|string|the count and size of flow files to be dropped as a result of this request. |Yes| - | +|droppedCount|int32|the number of flow files that have been dropped thus far. |Yes| 0 | +|droppedSize|int64| the size of flow files currently queued in bytes. |Yes| 0 | +|Dropped|string|the count and size of flow files that have been dropped thus far. |Yes| - | +|state|string|the state of the request. |Yes| - | + +## DataflowUpdateRequestType + +|Name|Value|Description| +|-----|----|------------| +|RevertRequestType|Revert|defines a revert changes request.| +|UpdateRequestType|Update|defines an update version request.| + +## FlowPosition + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|posX|int64|the x coordinate. |No| - | +|posY|int64|the y coordinate. |No| - | \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/6_nifi_usergroup.md b/site/website/versioned_docs/version-v0.13.0/5_references/6_nifi_usergroup.md new file mode 100644 index 0000000000..5ad51bbec5 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/6_nifi_usergroup.md @@ -0,0 +1,55 @@ +--- +id: 6_nifi_usergroup +title: NiFi UserGroup +sidebar_label: NiFi UserGroup +--- + +`NifiUserGroup` is the Schema for the nifi user groups API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiUserGroup +metadata: + name: group-test +spec: + clusterRef: + name: nc + namespace: nifikop + usersRef: + - name: nc-0-node.nc-headless.nifikop.svc.cluster.local + - name: nc-controller.nifikop.mgt.cluster.local + accessPolicies: + - type: global + action: read + resource: /counters +``` + +## NifiUser +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects usergroups must create.|No|nil| +|spec|[NifiUserGroupSpec](#nifiusergroupspec)|defines the desired state of NifiUserGroup.|No|nil| +|status|[NifiUserGroupStatus](#nifiusergroupstatus)|defines the observed state of NifiUserGroup.|No|nil| + +## NifiUserGroupSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | +|usersRef|\[ \][UserReference](#userref)| contains the list of reference to NifiUsers that are part to the group. |No| [] | +|accessPolicies|\[ \][AccessPolicy](./2_nifi_user.md#accesspolicy)| defines the list of access policies that will be granted to the group. |No| [] | + +## NifiUserGroupStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|id|string| the nifi usergroup's node id.|Yes| - | +|version|string| the last nifi usergroup's node revision version catched.|Yes| - | + +## UserReference + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|name|string| name of the NifiUser. |Yes| - | +|namespace|string| the NifiUser namespace location. |Yes| - | + diff --git a/site/website/versioned_docs/version-v0.13.0/5_references/7_nifi_nodegroup_autoscaler.md b/site/website/versioned_docs/version-v0.13.0/5_references/7_nifi_nodegroup_autoscaler.md new file mode 100644 index 0000000000..7faf3f5445 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/5_references/7_nifi_nodegroup_autoscaler.md @@ -0,0 +1,59 @@ +--- +id: 7_nifi_nodegroup_autoscaler +title: NiFi NodeGroup Autoscaler +sidebar_label: NiFi NodeGroup Autoscaler +--- + +`NifiNodeGroupAutoscaler` is the Schema through which you configure automatic scaling of `NifiCluster` deployments. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiNodeGroupAutoscaler +metadata: + name: nifinodegroupautoscaler-sample +spec: + # contains the reference to the NifiCluster with the one the node group autoscaler is linked. + clusterRef: + name: nificluster-name + namespace: nifikop + # defines the id of the NodeConfig contained in NifiCluster.Spec.NodeConfigGroups + nodeConfigGroupId: default-node-group + # The selector used to identify nodes in NifiCluster.Spec.Nodes this autoscaler will manage + # Use Node.Labels in combination with this selector to clearly define which nodes will be managed by this autoscaler + nodeLabelsSelector: + matchLabels: + nifi_cr: nificluster-name + nifi_node_group: default-node-group + # the strategy used to decide how to add nodes to a nifi cluster + upscaleStrategy: simple + # the strategy used to decide how to remove nodes from an existing cluster + downscaleStrategy: lifo +``` + +## NifiNodeGroupAutoscaler +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects nodegroupautoscalers must create.|No|nil| +|spec|[NifiNodeGroupAutoscalerSpec](#nifinodegroupautoscalerspec)|defines the desired state of NifiNodeGroupAutoscaler.|No|nil| +|status|[NifiNodeGroupAutoscalerStatus](#nifinodegroupautoscalerstatus)|defines the observed state of NifiNodeGroupAutoscaler.|No|nil| + +## NifiNodeGroupAutoscalerSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|clusterRef|[ClusterReference](./2_nifi_user.md#clusterreference)| contains the reference to the NifiCluster containing the node group this autoscaler should manage. |Yes| - | +|nodeConfigGroupId| string | defines the id of the [NodeConfig](./1_nifi_cluster/3_node_config.md) contained in `NifiCluster.Spec.NodeConfigGroups`. |Yes| - | +|nodeLabelsSelector|[LabelSelector](https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#LabelSelector)| defines the set of labels used to identify nodes in a `NifiCluster` node config group. Use `Node.Labels` in combination with this selector to clearly define which nodes will be managed by this autoscaler. Take care to avoid having mutliple autoscalers managing the same nodes. |Yes| - | +|readOnlyConfig| [ReadOnlyConfig](./1_nifi_cluster/2_read_only_config.md) | defines a readOnlyConfig to apply to each node in this node group. Any settings here will override those set in the configured `nodeConfigGroupId`. |Yes| - | +|nodeConfig| [NodeConfig](./1_nifi_cluster/3_node_config.md) | defines a nodeConfig to apply to each node in this node group. Any settings here will override those set in the configured `nodeConfigGroupId`. |Yes| - | +|upscaleStrategy| string | The strategy NiFiKop will use to scale up the nodes managed by this autoscaler. Must be one of {`simple`}. |Yes| - | +|downscaleStrategy| string | The strategy NiFiKop will use to scale down the nodes managed by this autoscaler. Must be one of {`lifo`}. |Yes| - | +|replicas| int | the initial number of replicas to configure the `HorizontalPodAutoscaler` with. After the initial configuration, this `replicas` configuration will be automatically updated by the Kubernetes `HorizontalPodAutoscaler` controller. |No| 1 | + +## NifiNodeGroupAutoscalerStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|--------| +|state|string| the state of the nodegroup autoscaler. This is set by the autoscaler. |No| - | +|replicas|int| the current number of replicas running in the node group this autoscaler is managing. This is set by the autoscaler.|No| - | +|selector|string| the [selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) used by the `HorizontalPodAutoscaler` controller to identify the replicas in this node group. This is set by the autoscaler.|No| - | \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/6_contributing/0_contribution_organization.md b/site/website/versioned_docs/version-v0.13.0/6_contributing/0_contribution_organization.md new file mode 100644 index 0000000000..59d6c9afa8 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/6_contributing/0_contribution_organization.md @@ -0,0 +1,66 @@ +--- +id: 0_contribution_organization +title: Contribution organization +sidebar_label: Contribution organization +--- + +## New ownership for more community oriented + +The NiFiKop operator was originally started by Orange in March 2020 as [Orange-OpenSource/nifikop](https://github.com/Orange-OpenSource/nifikop), +and then forked as `konpyutaika/nifikop` in March 20222 : but this is the same codebase and the same developers. + +We made this decision in concert with Orange team, because some legal restrictions would not have allowed to involve and serve external community around this operator efficiently. +Therefore,we have chosen to fork the source code into another organization and repository, which will allow a more open ownership and community-oriented development. + +It is important to notice that Orange will still continue to work and contribute to the operator, but as part of the community :) + +## Organizations + +With this ownership move, we decided to set up a new project management, with the aims to be more and more community-oriented + +### Slack channel + +One of the most important topics we want to improve is probably the communication around the operator's development. +To achieve this, we have created a new Slack open to anyone who wants [to join](https://join.slack.com/t/konpytika/shared_invite/zt-14md072lv-Jr8mqYoeUrqzfZF~YGUpXA), +with two main channels: + +- [#nifikop-news](https://konpytika.slack.com/archives/C035FHN1MNG): There we will announce each new release, and communicate about next objectives for the operator. +- [#nifikop-discussion](https://konpytika.slack.com/archives/C035X6KP684): Direct discussion between each member of the community to design new needs, fix issues and help each other. + +### Tech scoping + +As we want to involve as much as possible the people on the operator, we will introduce a new support for brainstorming and designing new major features. + +This is the Tech Scoping, whose main objective is to describe the problem statement that we are trying to solve, +the different approaches that could solve it, and together discuss and challenge them to define the solution to be implemented. + +You can find all the tech scoping in this [Google Drive repository](https://drive.google.com/drive/folders/1-A__UxEdRBZrwEUJu4lMF4LJtIstrnT0?usp=sharing) + +### Teams + +#### NiFiKop Leads + +This group is currently composed of : + +- [Alexandre Guitton](https://github.com/erdrix) as original owner and developer of the operator +- [Julien Guitton](https://github.com/juldrixx) as representative of Orange contribution + +The mains objectives of this group are to : + +- Define the global roadmap of the operator, +- Ensure the reviews and validations of the PRs, +- Review and validate the Tech Scoping. + +This group aims to be more representative of the community, so if the operator community grows or if there is a needs, we would be happy to have more people in this group :) + +#### NiFiKop Contributors + +This group is currently composed of : + +The mains objectives of this group are to : + +- Manage issues to help people, +- Review PRs (not validation), +- Create and edit Tech Scoping for new features. + +This is an open group, so feel free to contact a NiFiKop Leader on Slack to join :) \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/6_contributing/1_developer_guide.md b/site/website/versioned_docs/version-v0.13.0/6_contributing/1_developer_guide.md new file mode 100644 index 0000000000..e494a8fb47 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/6_contributing/1_developer_guide.md @@ -0,0 +1,144 @@ +--- +id: 1_developer_guide +title: Developer guide +sidebar_label: Developer guide +--- + +## Operator SDK + +### Prerequisites + +NiFiKop has been validated with : + +- [go](https://golang.org/doc/install) version v1.17+. +- [docker](https://docs.docker.com/get-docker/) version 18.09+ +- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) version v1.16+ +- [Helm](https://helm.sh/) version v3.4.2 +- [Operator sdk](https://github.com/operator-framework/operator-sdk) version v1.18.1 + +### Initial setup + +Checkout the project. + +```bash +git clone https://github.com/konpyutaika/nifikop.git +cd nifikop +``` + +### Operator sdk + +The full list of command is available here : https://sdk.operatorframework.io/docs/upgrading-sdk-version/v1.0.0/#cli-changes + +### Build NiFiKop + +#### Local environment + +If you prefer working directly with your local go environment you can simply uses : + +```bash +make build +``` + +### Run NiFiKop + +We can quickly run NiFiKop in development mode (on your local host), then it will use your kubectl configuration file to connect to your kubernetes cluster. + +There are several ways to execute your operator : + +- Using your IDE directly +- Executing directly the Go binary +- deploying using the Helm charts + +If you want to configure your development IDE, you need to give it environment variables so that it will uses to connect to kubernetes. + +```bash +KUBECONFIG={path/to/your/kubeconfig} +WATCH_NAMESPACE={namespace_to_watch} +POD_NAME={name for operator pod} +LOG_LEVEL=Debug +OPERATOR_NAME=ide +``` + +#### Run the Operator Locally with the Go Binary + +This method can be used to run the operator locally outside of the cluster. This method may be preferred during development as it facilitates faster deployment and testing. + +Set the name of the operator in an environment variable + +```bash +export OPERATOR_NAME=nifi-operator +``` + +Deploy the CRDs. + +```bash +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nificlusters.yaml +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nifidataflows.yaml +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nifiparametercontexts.yaml +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nifiregistryclients.yaml +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nifiusergroups.yaml +kubectl apply -f config/crd/bases/nifi.konpyutaika.com_nifiusers.yaml +``` + +And deploy the operator. + +```bash +make run +``` + +This will run the operator in the `default` namespace using the default Kubernetes config file at `$HOME/.kube/config`. + +#### Deploy using the Helm Charts + +This section provides an instructions for running the operator Helm charts with an image that is built from the local branch. + +Build the image from the current branch. + +```bash +export DOCKER_REPO_BASE={your-docker-repo} +make docker-build +``` + +Push the image to docker hub (or to whichever repo you want to use) + +```bash +$ make docker-push +``` + +:::info +The image tag is a combination of the version as defined in `verion/version.go` and the branch name. +::: + +Install the Helm chart. + +```bash +helm install skeleton ./helm/nifikop \ + --set image.tag=v0.5.1-release \ + --namespace-{"nifikop"} +``` + +:::important +The `image.repository` and `image.tag` template variables have to match the names from the image that we pushed in the previous step. +::: + +:::info +We set the chart name to the branch, but it can be anything. +::: + +Lastly, verify that the operator is running. + +```console +$ kubectl get pods -n nifikop +NAME READY STATUS RESTARTS AGE +skeleton-nifikop-8946b89dc-4cfs9 1/1 Running 0 7m45s +``` + +## Helm + +The NiFiKop operator is released in the `konpyutaika-incubator` helm repository. + +In order to package the chart you need to run the following command. + +```bash +make helm-package +``` diff --git a/site/website/versioned_docs/version-v0.13.0/6_contributing/2_reporting_bugs.md b/site/website/versioned_docs/version-v0.13.0/6_contributing/2_reporting_bugs.md new file mode 100644 index 0000000000..376f0f76fc --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/6_contributing/2_reporting_bugs.md @@ -0,0 +1,25 @@ +--- +id: 2_reporting_bugs +title: Reporting bugs +sidebar_label: Reporting bugs +--- + +If any part of the NiFiKop project has bugs or documentation mistakes, please let us know by [opening an issue](https://github.com/konpyutaika/nifikop/issues/new). We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist. + +To make the bug report accurate and easy to understand, please try to create bug reports that are: + +- Specific. Include as much details as possible: which version, what environment, what configuration, etc. + +- Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please include the steps that might lead to the problem. + +- Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on operator-sdk is out of scope, but we are happy to provide guidance in the right direction or help with using operator-sdk itself. + +- Unique. Do not duplicate existing bug report. + +- Scoped. One bug per report. Do not follow up with another bug inside one report. + +It may be worthwhile to read [Elika Etemad’s article on filing good bug reports][filing-good-bugs] before creating a bug report. + +We might ask for further information to locate a bug. A duplicated bug report will be closed. + +[filing-good-bugs]: http://fantasai.inkedblade.net/style/talks/filing-good-bugs/ \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/6_contributing/3_credits.md b/site/website/versioned_docs/version-v0.13.0/6_contributing/3_credits.md new file mode 100644 index 0000000000..e04adf7f4b --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/6_contributing/3_credits.md @@ -0,0 +1,11 @@ +--- +id: 3_credits +title: Credits +sidebar_label: Credits +--- + +This implementation is based on other Open-Source project, and lot of the community ideas. Particular thanks to : + +- Operator implementation based on [banzaicloud/kafka-operator](https://github.com/banzaicloud/kafka-operator) +- NiFi kubernetes setup configuration inspired from [cetic/helm-nifi](https://github.com/cetic/helm-nifi) +- Implementation is based on [Operator SDK](https://github.com/operator-framework/operator-sdk) \ No newline at end of file diff --git a/site/website/versioned_docs/version-v0.13.0/7_upgrade/1_v0.7.x_to_v0.8.0.md b/site/website/versioned_docs/version-v0.13.0/7_upgrade/1_v0.7.x_to_v0.8.0.md new file mode 100644 index 0000000000..4cf1e08601 --- /dev/null +++ b/site/website/versioned_docs/version-v0.13.0/7_upgrade/1_v0.7.x_to_v0.8.0.md @@ -0,0 +1,165 @@ +--- +id: 1_v0.7.x_to_v0.8.0 +title: v0.7.x to v0.8.0 +sidebar_label: v0.7.x to v0.8.0 +--- + +Guide to migrate operator resources built using `nifi.orange.com/v1alpha1` to `nifi.konpyutaika/v1alpha1`. + +## Getting started + +The goal is to migrate your NiFiKop resources from the old CRDs to the new ones without any service interruption. + +To do this, it is necessary to have both versions of CRDs available on Kubernetes and to have the old operator stopped (to prevent any manipulation on the resources). +Then launch the script developed in nodejs presented in the following. The script will copy the resources in the old CRDs to the new CRDs keeping only the relevant fields (labels, annotations, name and spec) and then copy the status. + +## Prerequisites + +- [nodejs](https://nodejs.org/en/download/) version 15.3.0+ +- [npm](https://docs.npmjs.com/cli/v7/configuring-npm/install) version 7.0.14+ + +## Initial setup + +Create a nodejs project and download the required dependencies: + +```bash +npm init -y +npm install @kubernetes/client-node@0.16.3 minimist@1.2.6 +``` + +In `package.json` add the following script: + +```json +"start": "node --no-warnings index.js" +``` + +Your `package.json` should look like that: + +```json +{ + "name": "nifikop_crd_migration", + "version": "1.0.0", + "description": "Script to migrate from the old CRDs to the new CRDs.", + "main": "index.js", + "scripts": { + "start": "node --no-warnings index.js", + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [ + "K8S", + "NiFiKop", + "CRDs" + ], + "license": "ISC", + "dependencies": { + "@kubernetes/client-node": "^0.16.3", + "minimist": "^1.2.6" + } +} +``` + +## Script setup + +Create the file `index.js` with the following content: + +```js +process.env['NODE_TLS_REJECT_UNAUTHORIZED'] = 0; +const k8s = require('@kubernetes/client-node'); + +const kc = new k8s.KubeConfig(); +kc.loadFromDefault(); + +const k8sApi = kc.makeApiClient(k8s.CustomObjectsApi); + +const KONPYUTAIKA_GROUP = 'nifi.konpyutaika.com'; +const KONPYUTAIKA_GROUP_VERSION = 'v1alpha1'; +const ORANGE_GROUP = 'nifi.orange.com'; +const ORANGE_GROUP_VERSION = 'v1alpha1'; + +const call = async (SRC_GRP, SRC_GRP_VER, DST_GRP, DST_GRP_VER, KIND_PLURAL, NAMESPACE) => { + console.log(`Listing ${KIND_PLURAL} of ${SRC_GRP}/${SRC_GRP_VER} in ${NAMESPACE}...`); + const listResources = (await k8sApi.listNamespacedCustomObject(SRC_GRP, SRC_GRP_VER, NAMESPACE, KIND_PLURAL)).body.items; + return Promise.all(listResources.map(async (resource) => { + try { + console.log(`Found ${resource.kind} "${resource.metadata.name}" of ${resource.apiVersion} in ${NAMESPACE}`); + + if (resource.metadata.ownerReferences) { + console.log(`${resource.kind} ${resource.metadata.name} mananged by something else (ownerRefereces is set).`); + return; + } + + const bodyResource = { + apiVersion: `${DST_GRP}/${DST_GRP_VER}`, + kind: resource.kind, + metadata: { + name: resource.metadata.name, + annotations: resource.metadata.annotations, + labels: resource.metadata.labels + }, + spec: resource.spec + }; + + console.log(`Creating ${bodyResource.kind} "${bodyResource.metadata.name}" of ${bodyResource.apiVersion} in ${NAMESPACE}...`); + const newResource = (await k8sApi.createNamespacedCustomObject(DST_GRP, DST_GRP_VER, NAMESPACE, KIND_PLURAL, bodyResource)).body; + console.log('...done creating.'); + + const bodyStatus = { + apiVersion: newResource.apiVersion, + kind: newResource.kind, + metadata: { + name: newResource.metadata.name, + resourceVersion: newResource.metadata.resourceVersion + }, + status: resource.status + }; + + console.log(`Copying status from ${resource.kind} "${resource.metadata.name}" of ${newResource.apiVersion} to ${newResource.kind} "${newResource.metadata.name}" of ${newResource.apiVersion} in ${NAMESPACE}...`); + const newResourceWithStatus = (await k8sApi.replaceNamespacedCustomObjectStatus(DST_GRP, DST_GRP_VER, NAMESPACE, KIND_PLURAL, bodyStatus.metadata.name, bodyStatus)).body; + console.log('...done copying.'); + return newResourceWithStatus; + } + catch (e) { + console.error(e.body ? e.body.message ? e.body.message : e.body : e); + } + })); +}; + +const argv = require('minimist')(process.argv.slice(2)); + +let NAMESPACE = argv.namespace ? argv.namespace.length > 0 ? argv.namespace : 'default' : 'default'; +let KIND_PLURAL = { + cluster: 'nificlusters', + dataflow: 'nifidataflows', + parametercontext: 'nifiparametercontexts', + registryclient: 'nifiregistryclients', + user: 'nifiusers', + usergroup: 'nifiusergroups', +}; + +if (!argv.type) { + console.error('Type not provided'); + process.exit(1); +} + +if (!KIND_PLURAL[argv.type]) { + console.error(`Type ${argv.type} is not one of the following types: ${Object.keys(KIND_PLURAL)}`); + process.exit(1); +} + +console.log(`########### START: ${KIND_PLURAL[argv.type]} ###########`); +call( ORANGE_GROUP, ORANGE_GROUP_VERSION, KONPYUTAIKA_GROUP, KONPYUTAIKA_GROUP_VERSION, KIND_PLURAL[argv.type], NAMESPACE) + .then(r => console.log('############ END ############')) + .catch(e => console.error(e)); +``` + +## Run script + +To migrate the resources, run the following command: + +```bash +npm start -- --type= --namespace= +``` + +with +- ``: NiFiKop resource type (cluster, dataflow, user, usergroup, parametercontext or registryclient) +- `:` Kubernetes namespace where the resources will be migrated \ No newline at end of file diff --git a/site/website/versioned_sidebars/version-v0.13.0-sidebars.json b/site/website/versioned_sidebars/version-v0.13.0-sidebars.json new file mode 100644 index 0000000000..7b65e2ea72 --- /dev/null +++ b/site/website/versioned_sidebars/version-v0.13.0-sidebars.json @@ -0,0 +1,227 @@ +{ + "version-v0.13.0/docs": [ + { + "collapsed": true, + "type": "category", + "label": "Concepts", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/1_concepts/1_introduction" + }, + { + "type": "doc", + "id": "version-v0.13.0/1_concepts/2_design_principes" + }, + { + "type": "doc", + "id": "version-v0.13.0/1_concepts/3_features" + }, + { + "type": "doc", + "id": "version-v0.13.0/1_concepts/4_roadmap" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Setup", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/2_setup/1_getting_started" + }, + { + "collapsed": true, + "type": "category", + "label": "Platform Setup", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/2_setup/2_platform_setup/1_gke" + }, + { + "type": "doc", + "id": "version-v0.13.0/2_setup/2_platform_setup/2_k3d" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Install", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/2_setup/3_install/1_customizable_install_with_helm" + } + ] + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Tasks", + "items": [ + { + "collapsed": true, + "type": "category", + "label": "NiFi Cluster", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/1_nifi_cluster/2_cluster_scaling" + }, + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/1_nifi_cluster/4_external_cluster" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Security", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/2_security/1_ssl" + }, + { + "collapsed": true, + "type": "category", + "label": "Authentication", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/2_security/2_authentication/1_oidc" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Authorization", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/2_security/2_authorization/1_authorizer" + } + ] + } + ] + }, + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/3_nifi_dataflow" + }, + { + "type": "doc", + "id": "version-v0.13.0/3_tasks/4_nifi_user_group" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Reference", + "items": [ + { + "collapsed": true, + "type": "category", + "label": "NiFi Cluster", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/1_nifi_cluster" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/2_read_only_config" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/3_node_config" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/4_node" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/5_node_state" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/6_listeners_config" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/1_nifi_cluster/7_external_service_config" + } + ] + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/2_nifi_user" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/3_nifi_registry_client" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/4_nifi_parameter_context" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/5_nifi_dataflow" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/6_nifi_usergroup" + }, + { + "type": "doc", + "id": "version-v0.13.0/5_references/7_nifi_nodegroup_autoscaler" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Contributing", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/6_contributing/0_contribution_organization" + }, + { + "type": "doc", + "id": "version-v0.13.0/6_contributing/1_developer_guide" + }, + { + "type": "doc", + "id": "version-v0.13.0/6_contributing/2_reporting_bugs" + }, + { + "type": "doc", + "id": "version-v0.13.0/6_contributing/3_credits" + } + ] + }, + { + "collapsed": true, + "type": "category", + "label": "Upgrade Guides", + "items": [ + { + "type": "doc", + "id": "version-v0.13.0/7_upgrade/1_v0.7.x_to_v0.8.0" + } + ] + } + ] +} diff --git a/site/website/versions.json b/site/website/versions.json index ad4c601a3f..e7aa4dcbb3 100644 --- a/site/website/versions.json +++ b/site/website/versions.json @@ -1,4 +1,5 @@ [ + "v0.13.0", "v0.12.0", "v0.11.0", "v0.10.0", diff --git a/version/version.go b/version/version.go index c5922ea729..e7345b3fea 100644 --- a/version/version.go +++ b/version/version.go @@ -1,5 +1,5 @@ package version var ( - Version = "0.12.0" + Version = "0.13.0" )