diff --git a/.gitignore b/.gitignore index e3692b554d..b2e8d73c5d 100644 --- a/.gitignore +++ b/.gitignore @@ -74,8 +74,8 @@ Session.vim tags ### VisualStudioCode ### .vscode/* -.history .config/* +.history __debug_bin* # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode diff --git a/CHANGELOG.md b/CHANGELOG.md index 3032c019ab..41859240a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,9 +4,12 @@ - [PR #292](https://github.com/konpyutaika/nifikop/pull/292) - **[Operator/NifiCluster]** Modify RBAC kubebuilder annotations so NiFiKop works on OpenShift - [PR #292](https://github.com/konpyutaika/nifikop/pull/292) - **[Helm Chart]** Add Parameter for RunAsUser for OpenShift +- [PR #291](https://github.com/konpyutaika/nifikop/pull/291) - **[Plugin]** Implementation on NiFiKop's plugin. +- [PR #291](https://github.com/konpyutaika/nifikop/pull/291) - **[Operator/NifiConnection]** Implementation on NifiConnection controller. +- ### Changed -- [PR #290](https://github.com/konpyutaika/nifikop/pull/290) - **[Operator/NifiCluster]** Change default sensitive algorithm +- [PR #290](https://github.com/konpyutaika/nifikop/pull/290) - **[Operator/NifiCluster]** Change default sensitive algorithm. ### Fixed Bugs diff --git a/Makefile b/Makefile index 31a79b7513..ae516850e1 100644 --- a/Makefile +++ b/Makefile @@ -396,4 +396,8 @@ catalog-build: opm ## Build a catalog image. # Push the catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. - $(MAKE) docker-push IMG=$(CATALOG_IMG) \ No newline at end of file + $(MAKE) docker-push IMG=$(CATALOG_IMG) + +.PHONY: kubectl-nifikop +kubectl-nifikop: + go build -o bin/kubectl-nifikop ./cmd/kubectl-nifikop/main.go \ No newline at end of file diff --git a/PROJECT b/PROJECT index 8a7b017cfe..4493ab2c89 100644 --- a/PROJECT +++ b/PROJECT @@ -118,4 +118,13 @@ resources: kind: NifiNodeGroupAutoscaler path: github.com/konpyutaika/nifikop/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: konpyutaika.com + group: nifi + kind: NifiConnection + path: github.com/konpyutaika/nifikop/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1/common_types.go b/api/v1/common_types.go index 9c98392fb2..fc00a70c60 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -24,8 +24,9 @@ type DataflowState string // DataflowUpdateRequestType defines the type of versioned flow update request type DataflowUpdateRequestType string -// DataflowUpdateStrategy defines the type of strategy to update a flow -type DataflowUpdateStrategy string +// ComponentUpdateStrategy defines the type of strategy to update a component +// +kubebuilder:validation:Enum={"drop","drain"} +type ComponentUpdateStrategy string // RackAwarenessState stores info about rack awareness status type RackAwarenessState string @@ -46,12 +47,15 @@ type ConfigurationState string type InitClusterNode bool // PKIBackend represents an interface implementing the PKIManager +// +kubebuilder:validation:Enum={"cert-manager","vault"} type PKIBackend string // ClientConfigType represents an interface implementing the ClientConfigManager +// +kubebuilder:validation:Enum={"tls","basic"} type ClientConfigType string // ClusterType represents an interface implementing the ClientConfigManager +// +kubebuilder:validation:Enum={"external","internal"} type ClusterType string // AccessPolicyType represents the type of access policy @@ -285,9 +289,9 @@ const ( // DrainStrategy leads to shutting down only input components (Input processors, remote input process group) // and dropping all flowfiles from the flow. - DrainStrategy DataflowUpdateStrategy = "drain" + DrainStrategy ComponentUpdateStrategy = "drain" // DropStrategy leads to shutting down all components and dropping all flowfiles from the flow. - DropStrategy DataflowUpdateStrategy = "drop" + DropStrategy ComponentUpdateStrategy = "drop" // UserStateCreated describes the status of a NifiUser as created UserStateCreated UserState = "created" @@ -437,6 +441,7 @@ func SecretRefsEquals(secretRefs []SecretReference) bool { return true } +// +kubebuilder:validation:Enum={"never","always","once"} type DataflowSyncMode string const ( diff --git a/api/v1/nificluster_types.go b/api/v1/nificluster_types.go index 5ea9a63dcd..ce13dbfe84 100644 --- a/api/v1/nificluster_types.go +++ b/api/v1/nificluster_types.go @@ -27,10 +27,8 @@ const ( // NifiClusterSpec defines the desired state of NifiCluster type NifiClusterSpec struct { // clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. - // +kubebuilder:validation:Enum={"tls","basic"} ClientType ClientConfigType `json:"clientType,omitempty"` // type defines if the cluster is internal (i.e manager by the operator) or external. - // +kubebuilder:validation:Enum={"external","internal"} Type ClusterType `json:"type,omitempty"` // nodeURITemplate used to dynamically compute node uri (used if external type) NodeURITemplate string `json:"nodeURITemplate,omitempty"` @@ -415,7 +413,6 @@ type SSLSecrets struct { // https://cert-manager.io/docs/concepts/issuer/ IssuerRef *cmmeta.ObjectReference `json:"issuerRef,omitempty"` // TODO : add vault - // +kubebuilder:validation:Enum={"cert-manager","vault"} PKIBackend PKIBackend `json:"pkiBackend,omitempty"` //,"vault" } diff --git a/api/v1/nifidataflow_types.go b/api/v1/nifidataflow_types.go index 7f07c4ba46..3e80f4799d 100644 --- a/api/v1/nifidataflow_types.go +++ b/api/v1/nifidataflow_types.go @@ -22,7 +22,6 @@ type NifiDataflowSpec struct { // contains the reference to the ParameterContext with the one the dataflow is linked. ParameterContextRef *ParameterContextReference `json:"parameterContextRef,omitempty"` // if the flow will be synchronized once, continuously or never - // +kubebuilder:validation:Enum={"never","always","once"} SyncMode *DataflowSyncMode `json:"syncMode,omitempty"` // whether the flow is considered as ran if some controller services are still invalid or not. SkipInvalidControllerService bool `json:"skipInvalidControllerService,omitempty"` @@ -33,8 +32,7 @@ type NifiDataflowSpec struct { // contains the reference to the NifiRegistry with the one the dataflow is linked. RegistryClientRef *RegistryClientReference `json:"registryClientRef,omitempty"` // describes the way the operator will deal with data when a dataflow will be updated : drop or drain - // +kubebuilder:validation:Enum={"drop","drain"} - UpdateStrategy DataflowUpdateStrategy `json:"updateStrategy"` + UpdateStrategy ComponentUpdateStrategy `json:"updateStrategy"` } type FlowPosition struct { diff --git a/api/v1alpha1/common_types.go b/api/v1alpha1/common_types.go index 95ecbd1314..41e9c4ae16 100644 --- a/api/v1alpha1/common_types.go +++ b/api/v1alpha1/common_types.go @@ -3,6 +3,7 @@ package v1alpha1 import ( "fmt" + v1 "github.com/konpyutaika/nifikop/api/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -21,11 +22,15 @@ type ClusterScalingStrategy string // DataflowState defines the state of a NifiDataflow type DataflowState string +// ConnectionState defines the state of a NifiConnection +type ConnectionState string + // DataflowUpdateRequestType defines the type of versioned flow update request type DataflowUpdateRequestType string -// DataflowUpdateStrategy defines the type of strategy to update a flow -type DataflowUpdateStrategy string +// ComponentUpdateStrategy defines the type of strategy to update a component +// +kubebuilder:validation:Enum={"drop","drain"} +type ComponentUpdateStrategy string // RackAwarenessState stores info about rack awareness status type RackAwarenessState string @@ -46,12 +51,15 @@ type ConfigurationState string type InitClusterNode bool // PKIBackend represents an interface implementing the PKIManager +// +kubebuilder:validation:Enum={"cert-manager","vault"} type PKIBackend string // ClientConfigType represents an interface implementing the ClientConfigManager +// +kubebuilder:validation:Enum={"tls","basic"} type ClientConfigType string // ClusterType represents an interface implementing the ClientConfigManager +// +kubebuilder:validation:Enum={"external","internal"} type ClusterType string // AccessPolicyType represents the type of access policy @@ -278,6 +286,13 @@ const ( // DataflowStateInSync describes the status of a NifiDataflow as in sync DataflowStateInSync DataflowState = "InSync" + // ConnectionStateOutOfSync describes the status of a NifiConnection as out of sync + ConnectionStateOutOfSync ConnectionState = "OutOfSync" + // ConnectionStateInSync describes the status of a NifiConnection as in sync + ConnectionStateInSync ConnectionState = "InSync" + // ConnectionStateCreated describes the status of a NifiConnection as created + ConnectionStateCreated ConnectionState = "Created" + // RevertRequestType defines a revert changes request. RevertRequestType DataflowUpdateRequestType = "Revert" // UpdateRequestType defines an update version request. @@ -285,9 +300,9 @@ const ( // DrainStrategy leads to shutting down only input components (Input processors, remote input process group) // and dropping all flowfiles from the flow. - DrainStrategy DataflowUpdateStrategy = "drain" + DrainStrategy ComponentUpdateStrategy = "drain" // DropStrategy leads to shutting down all components and dropping all flowfiles from the flow. - DropStrategy DataflowUpdateStrategy = "drop" + DropStrategy ComponentUpdateStrategy = "drop" // UserStateCreated describes the status of a NifiUser as created UserStateCreated UserState = "created" @@ -437,6 +452,21 @@ func SecretRefsEquals(secretRefs []SecretReference) bool { return true } +func ComponentRefsEquals(componentRefs []ComponentReference) bool { + c1 := componentRefs[0] + name := c1.Name + ns := c1.Namespace + + for _, component := range componentRefs { + if name != component.Name || ns != component.Namespace || ns != string(component.Type) || ns != component.SubName { + return false + } + } + + return true +} + +// +kubebuilder:validation:Enum={"never","always","once"} type DataflowSyncMode string const ( @@ -462,3 +492,64 @@ const ( // downscale strategy targeting nodes which are least busy in terms of # flowfiles in queues LeastBusyClusterDownscaleStrategy ClusterScalingStrategy = "leastbusy" ) + +// Change the list to {"dataflow","input-port","output-port","processor","process-group"} when all the type are available +// +kubebuilder:validation:Enum={"dataflow"} +type ComponentType string + +const ( + ComponentDataflow ComponentType = "dataflow" + ComponentInputPort ComponentType = "input-port" + ComponentOutputPort ComponentType = "output-port" + ComponentProcessor ComponentType = "processor" + ComponentFunnel ComponentType = "funnel" + ComponentProcessGroup ComponentType = "process-group" +) + +type ComponentInformation struct { + Id string `json:"id"` + GroupId string `json:"groupId"` + Type string `json:"type"` + ParentGroupId string `json:"parentGroupId"` + ClusterRef v1.ClusterReference `json:"clusterRef"` +} + +// +kubebuilder:validation:Enum={"DO_NOT_LOAD_BALANCE","PARTITION_BY_ATTRIBUTE","ROUND_ROBIN","SINGLE"} +type ConnectionLoadBalanceStrategy string + +const ( + // Do not load balance FlowFiles between nodes in the cluster. + StrategyDoNotLoadBalance ConnectionLoadBalanceStrategy = "DO_NOT_LOAD_BALANCE" + // Determine which node to send a given FlowFile to based on the value of a user-specified FlowFile Attribute. All FlowFiles that have the same value for said Attribute will be sent to the same node in the cluster. + StrategyPartitionByAttribute ConnectionLoadBalanceStrategy = "PARTITION_BY_ATTRIBUTE" + // FlowFiles will be distributed to nodes in the cluster in a Round-Robin fashion. However, if a node in the cluster is not able to receive data as fast as other nodes, that node may be skipped in one or more iterations in order to maximize throughput of data distribution across the cluster. + StrategyRoundRobin ConnectionLoadBalanceStrategy = "ROUND_ROBIN" + // All FlowFiles will be sent to the same node. Which node they are sent to is not defined. + StrategySingle ConnectionLoadBalanceStrategy = "SINGLE" +) + +// +kubebuilder:validation:Enum={"DO_NOT_COMPRESS","COMPRESS_ATTRIBUTES_ONLY","COMPRESS_ATTRIBUTES_AND_CONTENT"} +type ConnectionLoadBalanceCompression string + +const ( + // FlowFiles will not be compressed. + CompressionDoNotCompress ConnectionLoadBalanceCompression = "DO_NOT_COMPRESS" + // FlowFiles' attributes will be compressed, but the FlowFiles' contents will not be + CompressionCompressAttributesOnly ConnectionLoadBalanceCompression = "COMPRESS_ATTRIBUTES_ONLY" + // FlowFiles' attributes and content will be compressed + CompressionCompressAttributesAndContent ConnectionLoadBalanceCompression = "COMPRESS_ATTRIBUTES_AND_CONTENT" +) + +// +kubebuilder:validation:Enum={"FirstInFirstOutPrioritizer","NewestFlowFileFirstPrioritizer","OldestFlowFileFirstPrioritizer","PriorityAttributePrioritizer"} +type ConnectionPrioritizer string + +const ( + // Given two FlowFiles, the one that reached the connection first will be processed first. + PrioritizerFirstInFirstOutPrioritizer ConnectionPrioritizer = "FirstInFirstOutPrioritizer" + // Given two FlowFiles, the one that is newest in the dataflow will be processed first. + PrioritizerNewestFlowFileFirstPrioritizer ConnectionPrioritizer = "NewestFlowFileFirstPrioritizer" + // Given two FlowFiles, the one that is oldest in the dataflow will be processed first. 'This is the default scheme that is used if no prioritizers are selected'. + PrioritizerOldestFlowFileFirstPrioritizer ConnectionPrioritizer = "OldestFlowFileFirstPrioritizer" + // Given two FlowFiles, an attribute called “priority” will be extracted. The one that has the lowest priority value will be processed first. + PrioritizerPriorityAttributePrioritizer ConnectionPrioritizer = "PriorityAttributePrioritizer" +) diff --git a/api/v1alpha1/nificluster_types.go b/api/v1alpha1/nificluster_types.go index de2f121afe..4cb5dfacc4 100644 --- a/api/v1alpha1/nificluster_types.go +++ b/api/v1alpha1/nificluster_types.go @@ -27,10 +27,8 @@ const ( // NifiClusterSpec defines the desired state of NifiCluster type NifiClusterSpec struct { // clientType defines if the operator will use basic or tls authentication to query the NiFi cluster. - // +kubebuilder:validation:Enum={"tls","basic"} ClientType ClientConfigType `json:"clientType,omitempty"` // type defines if the cluster is internal (i.e manager by the operator) or external. - // +kubebuilder:validation:Enum={"external","internal"} Type ClusterType `json:"type,omitempty"` // nodeURITemplate used to dynamically compute node uri (used if external type) NodeURITemplate string `json:"nodeURITemplate,omitempty"` @@ -373,7 +371,6 @@ type SSLSecrets struct { // https://cert-manager.io/docs/concepts/issuer/ IssuerRef *cmmeta.ObjectReference `json:"issuerRef,omitempty"` // TODO : add vault - // +kubebuilder:validation:Enum={"cert-manager","vault"} PKIBackend PKIBackend `json:"pkiBackend,omitempty"` //,"vault" } diff --git a/api/v1alpha1/nificonnection_types.go b/api/v1alpha1/nificonnection_types.go new file mode 100644 index 0000000000..ffe6035dfa --- /dev/null +++ b/api/v1alpha1/nificonnection_types.go @@ -0,0 +1,144 @@ +package v1alpha1 + +import ( + v1 "github.com/konpyutaika/nifikop/api/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// NifiConnectionSpec defines the desired state of NifiConnection +type NifiConnectionSpec struct { + // the Source component of the connection. + Source ComponentReference `json:"source"` + // the Destination component of the connection. + Destination ComponentReference `json:"destination"` + // the Configuration of the connection. + Configuration ConnectionConfiguration `json:"configuration,omitempty"` + // describes the way the operator will deal with data when a connection will be updated : drop or drain. + UpdateStrategy v1.ComponentUpdateStrategy `json:"updateStrategy"` +} + +type ComponentReference struct { + // the name of the component. + Name string `json:"name"` + // the namespace of the component. + Namespace string `json:"namespace,omitempty"` + // the type of the component (e.g. nifidataflow). + Type ComponentType `json:"type"` + // the name of the sub component (e.g. queue or port name). + SubName string `json:"subName,omitempty"` +} + +type ConnectionConfiguration struct { + // the maximum amount of time an object may be in the flow before it will be automatically aged out of the flow. + FlowFileExpiration string `json:"flowFileExpiration,omitempty"` + // the maximum data size of objects that can be queued before back pressure is applied. + // +kubebuilder:default="1 GB" + BackPressureDataSizeThreshold string `json:"backPressureDataSizeThreshold,omitempty"` + // the maximum number of objects that can be queued before back pressure is applied. + // +kubebuilder:default=10000 + BackPressureObjectThreshold int64 `json:"backPressureObjectThreshold,omitempty"` + // how to load balance the data in this Connection across the nodes in the cluster. + // +kubebuilder:default="DO_NOT_LOAD_BALANCE" + LoadBalanceStrategy ConnectionLoadBalanceStrategy `json:"loadBalanceStrategy,omitempty"` + // the FlowFile Attribute to use for determining which node a FlowFile will go to. + LoadBalancePartitionAttribute string `json:"loadBalancePartitionAttribute,omitempty"` + // whether or not data should be compressed when being transferred between nodes in the cluster. + // +kubebuilder:default="DO_NOT_COMPRESS" + LoadBalanceCompression ConnectionLoadBalanceCompression `json:"loadBalanceCompression,omitempty"` + // the comparators used to prioritize the queue. + Prioritizers []ConnectionPrioritizer `json:"prioritizers,omitempty"` + // the index of the bend point where to place the connection label. + LabelIndex *int32 `json:"labelIndex,omitempty"` + // the bend points on the connection. + Bends []ConnectionBend `json:"bends,omitempty"` +} + +type ConnectionBend struct { + // The x coordinate. + X *int64 `json:"posX,omitempty"` + // The y coordinate. + Y *int64 `json:"posY,omitempty"` +} + +// NifiConnectionStatus defines the observed state of NifiConnection +type NifiConnectionStatus struct { + // connection ID. + ConnectionId string `json:"connectionID"` + // connection current state. + State ConnectionState `json:"state"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// NifiConnection is the Schema for the nificonnections API +type NifiConnection struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NifiConnectionSpec `json:"spec,omitempty"` + Status NifiConnectionStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// NifiConnectionList contains a list of NifiConnection +type NifiConnectionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NifiConnection `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NifiConnection{}, &NifiConnectionList{}) +} + +func (nCon *NifiConnectionSpec) IsValid() bool { + return nCon.Source.IsValid() && nCon.Destination.IsValid() && nCon.Configuration.IsValid() +} + +func (ref *ComponentReference) IsValid() bool { + return ref.Type == ComponentDataflow && ref.SubName != "" +} + +func (conf *ConnectionConfiguration) IsValid() bool { + if conf.LoadBalanceStrategy == StrategyPartitionByAttribute && len(conf.GetLoadBalancePartitionAttribute()) == 0 { + return false + } + return true +} + +func (conf *ConnectionConfiguration) GetFlowFileExpiration() string { + return conf.FlowFileExpiration +} + +func (conf *ConnectionConfiguration) GetLoadBalancePartitionAttribute() string { + return conf.LoadBalancePartitionAttribute +} + +func (conf *ConnectionConfiguration) GetPrioritizers() []ConnectionPrioritizer { + return conf.Prioritizers +} + +func (conf *ConnectionConfiguration) GetStringPrioritizers() []string { + var prefix string = "org.apache.nifi.prioritizer." + prioritizers := []string{} + for _, prioritizer := range conf.Prioritizers { + prioritizers = append(prioritizers, prefix+string(prioritizer)) + } + return prioritizers +} + +func (conf *ConnectionConfiguration) GetLabelIndex() int32 { + if conf.LabelIndex != nil { + return *conf.LabelIndex + } + return 0 +} + +func (conf *ConnectionConfiguration) GetBends() []ConnectionBend { + return conf.Bends +} diff --git a/api/v1alpha1/nifidataflow_conversion.go b/api/v1alpha1/nifidataflow_conversion.go index 2c3b4e6364..7afa9cf368 100644 --- a/api/v1alpha1/nifidataflow_conversion.go +++ b/api/v1alpha1/nifidataflow_conversion.go @@ -2,6 +2,7 @@ package v1alpha1 import ( "fmt" + v1 "github.com/konpyutaika/nifikop/api/v1" "sigs.k8s.io/controller-runtime/pkg/conversion" ) @@ -66,7 +67,7 @@ func convertNifiDataflowSpec(src *NifiDataflowSpec, dst *v1.NifiDataflow) error dst.Spec.SkipInvalidComponent = src.SkipInvalidComponent convertNifiDataflowClusterRef(src.ClusterRef, dst) convertNifiDataflowRegistryClientRef(src.RegistryClientRef, dst) - dst.Spec.UpdateStrategy = v1.DataflowUpdateStrategy(src.UpdateStrategy) + dst.Spec.UpdateStrategy = v1.ComponentUpdateStrategy(src.UpdateStrategy) return nil } @@ -198,7 +199,7 @@ func convertFromNifiDataflowSpec(src *v1.NifiDataflowSpec, dst *NifiDataflow) er dst.Spec.SkipInvalidComponent = src.SkipInvalidComponent convertFromNifiDataflowClusterRef(src.ClusterRef, dst) convertFromNifiDataflowRegistryClientRef(src.RegistryClientRef, dst) - dst.Spec.UpdateStrategy = DataflowUpdateStrategy(src.UpdateStrategy) + dst.Spec.UpdateStrategy = ComponentUpdateStrategy(src.UpdateStrategy) return nil } diff --git a/api/v1alpha1/nifidataflow_types.go b/api/v1alpha1/nifidataflow_types.go index 68d1277c74..149c45baaa 100644 --- a/api/v1alpha1/nifidataflow_types.go +++ b/api/v1alpha1/nifidataflow_types.go @@ -19,7 +19,6 @@ type NifiDataflowSpec struct { // contains the reference to the ParameterContext with the one the dataflow is linked. ParameterContextRef *ParameterContextReference `json:"parameterContextRef,omitempty"` // if the flow will be synchronized once, continuously or never - // +kubebuilder:validation:Enum={"never","always","once"} SyncMode *DataflowSyncMode `json:"syncMode,omitempty"` // whether the flow is considered as ran if some controller services are still invalid or not. SkipInvalidControllerService bool `json:"skipInvalidControllerService,omitempty"` @@ -30,8 +29,7 @@ type NifiDataflowSpec struct { // contains the reference to the NifiRegistry with the one the dataflow is linked. RegistryClientRef *RegistryClientReference `json:"registryClientRef,omitempty"` // describes the way the operator will deal with data when a dataflow will be updated : drop or drain - // +kubebuilder:validation:Enum={"drop","drain"} - UpdateStrategy DataflowUpdateStrategy `json:"updateStrategy"` + UpdateStrategy ComponentUpdateStrategy `json:"updateStrategy"` } type FlowPosition struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index c58239a169..a7ee65b0c6 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -134,6 +134,37 @@ func (in *ClusterReference) DeepCopy() *ClusterReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentInformation) DeepCopyInto(out *ComponentInformation) { + *out = *in + out.ClusterRef = in.ClusterRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentInformation. +func (in *ComponentInformation) DeepCopy() *ComponentInformation { + if in == nil { + return nil + } + out := new(ComponentInformation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentReference) DeepCopyInto(out *ComponentReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentReference. +func (in *ComponentReference) DeepCopy() *ComponentReference { + if in == nil { + return nil + } + out := new(ComponentReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigmapReference) DeepCopyInto(out *ConfigmapReference) { *out = *in @@ -149,6 +180,63 @@ func (in *ConfigmapReference) DeepCopy() *ConfigmapReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionBend) DeepCopyInto(out *ConnectionBend) { + *out = *in + if in.X != nil { + in, out := &in.X, &out.X + *out = new(int64) + **out = **in + } + if in.Y != nil { + in, out := &in.Y, &out.Y + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionBend. +func (in *ConnectionBend) DeepCopy() *ConnectionBend { + if in == nil { + return nil + } + out := new(ConnectionBend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfiguration) DeepCopyInto(out *ConnectionConfiguration) { + *out = *in + if in.Prioritizers != nil { + in, out := &in.Prioritizers, &out.Prioritizers + *out = make([]ConnectionPrioritizer, len(*in)) + copy(*out, *in) + } + if in.LabelIndex != nil { + in, out := &in.LabelIndex, &out.LabelIndex + *out = new(int32) + **out = **in + } + if in.Bends != nil { + in, out := &in.Bends, &out.Bends + *out = make([]ConnectionBend, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfiguration. +func (in *ConnectionConfiguration) DeepCopy() *ConnectionConfiguration { + if in == nil { + return nil + } + out := new(ConnectionConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DisruptionBudget) DeepCopyInto(out *DisruptionBudget) { *out = *in @@ -582,6 +670,98 @@ func (in *NifiClusterTaskSpec) DeepCopy() *NifiClusterTaskSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiConnection) DeepCopyInto(out *NifiConnection) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiConnection. +func (in *NifiConnection) DeepCopy() *NifiConnection { + if in == nil { + return nil + } + out := new(NifiConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NifiConnection) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiConnectionList) DeepCopyInto(out *NifiConnectionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NifiConnection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiConnectionList. +func (in *NifiConnectionList) DeepCopy() *NifiConnectionList { + if in == nil { + return nil + } + out := new(NifiConnectionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NifiConnectionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiConnectionSpec) DeepCopyInto(out *NifiConnectionSpec) { + *out = *in + out.Source = in.Source + out.Destination = in.Destination + in.Configuration.DeepCopyInto(&out.Configuration) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiConnectionSpec. +func (in *NifiConnectionSpec) DeepCopy() *NifiConnectionSpec { + if in == nil { + return nil + } + out := new(NifiConnectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NifiConnectionStatus) DeepCopyInto(out *NifiConnectionStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NifiConnectionStatus. +func (in *NifiConnectionStatus) DeepCopy() *NifiConnectionStatus { + if in == nil { + return nil + } + out := new(NifiConnectionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NifiDataflow) DeepCopyInto(out *NifiDataflow) { *out = *in diff --git a/cmd/kubectl-nifikop/main.go b/cmd/kubectl-nifikop/main.go new file mode 100644 index 0000000000..bd6810ade5 --- /dev/null +++ b/cmd/kubectl-nifikop/main.go @@ -0,0 +1,36 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "os" + + "github.com/spf13/pflag" + + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifikop" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +func main() { + flags := pflag.NewFlagSet("kubectl-nifikop", pflag.ExitOnError) + pflag.CommandLine = flags + + root := nifikop.NewCmd(genericclioptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}) + if err := root.Execute(); err != nil { + os.Exit(1) + } +} diff --git a/cmd/kubectl-nifikop/nificluster/get/get.go b/cmd/kubectl-nifikop/nificluster/get/get.go new file mode 100644 index 0000000000..8b1e7cb29a --- /dev/null +++ b/cmd/kubectl-nifikop/nificluster/get/get.go @@ -0,0 +1,131 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiCluster in the current namespace + %[1]s get + + # view NifiCluster foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiCluster name]", + Short: "Get NifiCluster", + Example: fmt.Sprintf(getExample, "kubectl nifikop nificluster"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiClusterList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiCluster: %w", err) + } + } else { + item := &v1alpha1.NifiCluster{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiCluster %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiCluster: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.State)) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "State"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nificluster/nificluster.go b/cmd/kubectl-nifikop/nificluster/nificluster.go new file mode 100644 index 0000000000..8442bf1160 --- /dev/null +++ b/cmd/kubectl-nifikop/nificluster/nificluster.go @@ -0,0 +1,36 @@ +package nificluster + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nificluster/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nificluster [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nificonnection/get/get.go b/cmd/kubectl-nifikop/nificonnection/get/get.go new file mode 100644 index 0000000000..e7a09beb62 --- /dev/null +++ b/cmd/kubectl-nifikop/nificonnection/get/get.go @@ -0,0 +1,132 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiConnection in the current namespace + %[1]s get + + # view NifiConnection foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiConnection name]", + Short: "Get NifiConnection", + Example: fmt.Sprintf(getExample, "kubectl nifikop nificonnection"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiConnectionList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiDataflow: %w", err) + } + } else { + item := &v1alpha1.NifiConnection{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.State)) + data = append(data, item.Status.ConnectionId) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "State", "ConnectionId"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nificonnection/nificonnection.go b/cmd/kubectl-nifikop/nificonnection/nificonnection.go new file mode 100644 index 0000000000..f3980b7d2c --- /dev/null +++ b/cmd/kubectl-nifikop/nificonnection/nificonnection.go @@ -0,0 +1,36 @@ +package nificonnection + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nificonnection/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nificonnection [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifidataflow/get/get.go b/cmd/kubectl-nifikop/nifidataflow/get/get.go new file mode 100644 index 0000000000..3c81b28f5f --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/get/get.go @@ -0,0 +1,132 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiDataflow in the current namespace + %[1]s get + + # view NifiDataflow foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiDataflow name]", + Short: "Get NifiDataflow", + Example: fmt.Sprintf(getExample, "kubectl nifikop nifidataflow"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiDataflowList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiDataflow: %w", err) + } + } else { + item := &v1alpha1.NifiDataflow{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.State)) + data = append(data, item.Status.ProcessGroupID) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "State", "ProcessGroupID"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nifidataflow/input/input.go b/cmd/kubectl-nifikop/nifidataflow/input/input.go new file mode 100644 index 0000000000..63ae3e3d28 --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/input/input.go @@ -0,0 +1,36 @@ +package input + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/input/stop" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "input [subcommand] [flags]", + } + + cmd.AddCommand(stop.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifidataflow/input/stop/stop.go b/cmd/kubectl-nifikop/nifidataflow/input/stop/stop.go new file mode 100644 index 0000000000..bbb616e721 --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/input/stop/stop.go @@ -0,0 +1,124 @@ +package stop + +import ( + "context" + "errors" + "fmt" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" + + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + del bool + stopExample = ` + # set label %[2]s to force the stop of the input port bar of NifiDataflow foo + %[1]s stop foo bar + + # remove label %[2]s that forces the stop of the input port bar of NifiDataflow foo + %[1]s stop foo bar +` +) + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string + subName string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "stop [NifiDataflow name] [Input port name]", + Short: fmt.Sprintf("Set label %s to force the stop of an input port on NifiDataflow", nifiutil.StopInputPortLabel), + Example: fmt.Sprintf(stopExample, "kubectl nifikop nifidataflow input", nifiutil.StopInputPortLabel), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run(c) + }, + } + + cmd.Flags().BoolVarP(&del, "delete", "d", false, fmt.Sprintf("Delete label %s on NifiDataflow", nifiutil.StopInputPortLabel)) + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + if len(args) > 1 { + o.subName = args[1] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) != 2 && !del { + return errors.New("two arguments must be provided if 'delete' flag is missing") + } + if len(o.args) > 2 { + return errors.New("two arguments must be provided") + } + return nil +} + +// run runs the stop command. +func (o *options) run(cmd *cobra.Command) error { + item := &v1alpha1.NifiDataflow{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + + itemOriginal := item.DeepCopy() + labels := item.GetLabels() + + if !del { + labels[nifiutil.StopInputPortLabel] = o.subName + } else { + delete(labels, nifiutil.StopInputPortLabel) + } + + item.SetLabels(labels) + err = o.Client.Patch(context.TODO(), item, client.MergeFrom(itemOriginal)) + + if err != nil { + cmd.Println(fmt.Sprintf("Couldn't patch %s/%s: %v", item.GetNamespace(), item.GetName(), err)) + } else { + cmd.Println(fmt.Sprintf("NifiDataflow labels patched successfully in %s/%s", item.GetNamespace(), item.GetName())) + } + + return nil +} diff --git a/cmd/kubectl-nifikop/nifidataflow/nifidataflow.go b/cmd/kubectl-nifikop/nifidataflow/nifidataflow.go new file mode 100644 index 0000000000..348a4d3fc9 --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/nifidataflow.go @@ -0,0 +1,44 @@ +package nifidataflow + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/get" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/input" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/output" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/start" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/stop" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifidataflow [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + cmd.AddCommand(stop.New(streams)) + cmd.AddCommand(start.New(streams)) + cmd.AddCommand(input.New(streams)) + cmd.AddCommand(output.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifidataflow/output/output.go b/cmd/kubectl-nifikop/nifidataflow/output/output.go new file mode 100644 index 0000000000..007d64b56d --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/output/output.go @@ -0,0 +1,36 @@ +package output + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow/output/stop" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "output [subcommand] [flags]", + } + + cmd.AddCommand(stop.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifidataflow/output/stop/stop.go b/cmd/kubectl-nifikop/nifidataflow/output/stop/stop.go new file mode 100644 index 0000000000..0b3a32fcef --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/output/stop/stop.go @@ -0,0 +1,124 @@ +package stop + +import ( + "context" + "errors" + "fmt" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" + + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + del bool + stopExample = ` + # set label %[2]s to force the stop of the output port bar of NifiDataflow foo + %[1]s stop foo bar + + # remove label %[2]s that forces the stop of the output port bar of NifiDataflow foo + %[1]s stop foo bar +` +) + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string + subName string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "stop [NifiDataflow name] [Output port name]", + Short: fmt.Sprintf("Set label %s to force the stop of an output port on NifiDataflow", nifiutil.StopOutputPortLabel), + Example: fmt.Sprintf(stopExample, "kubectl nifikop nifidataflow output", nifiutil.StopOutputPortLabel), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run(c) + }, + } + + cmd.Flags().BoolVarP(&del, "delete", "d", false, fmt.Sprintf("Delete label %s on NifiDataflow", nifiutil.StopOutputPortLabel)) + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + if len(args) > 1 { + o.subName = args[1] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) != 2 && !del { + return errors.New("two arguments must be provided if 'delete' flag is missing") + } + if len(o.args) > 2 { + return errors.New("two arguments must be provided") + } + return nil +} + +// run runs the stop command. +func (o *options) run(cmd *cobra.Command) error { + item := &v1alpha1.NifiDataflow{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + + itemOriginal := item.DeepCopy() + labels := item.GetLabels() + + if !del { + labels[nifiutil.StopOutputPortLabel] = o.subName + } else { + delete(labels, nifiutil.StopOutputPortLabel) + } + + item.SetLabels(labels) + err = o.Client.Patch(context.TODO(), item, client.MergeFrom(itemOriginal)) + + if err != nil { + cmd.Println(fmt.Sprintf("Couldn't patch %s/%s: %v", item.GetNamespace(), item.GetName(), err)) + } else { + cmd.Println(fmt.Sprintf("NifiDataflow labels patched successfully in %s/%s", item.GetNamespace(), item.GetName())) + } + + return nil +} diff --git a/cmd/kubectl-nifikop/nifidataflow/start/start.go b/cmd/kubectl-nifikop/nifidataflow/start/start.go new file mode 100644 index 0000000000..df1d96010d --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/start/start.go @@ -0,0 +1,117 @@ +package start + +import ( + "context" + "errors" + "fmt" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" + + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + del bool + startExample = ` + # set label %[2]s to force the start of NifiDataflow foo + %[1]s start foo + + # remove label %[2]s that forces the start of NifiDataflow foo + %[1]s start foo +` +) + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "start [NifiDataflow name]", + Short: fmt.Sprintf("Set label %s to true on NifiDataflow", nifiutil.ForceStartLabel), + Example: fmt.Sprintf(startExample, "kubectl nifikop nifidataflow", nifiutil.ForceStartLabel), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run(c) + }, + } + + cmd.Flags().BoolVarP(&del, "delete", "d", false, fmt.Sprintf("Delete label %s on NifiDataflow", nifiutil.ForceStartLabel)) + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) != 1 { + return errors.New("one argument must provided") + } + return nil +} + +// run runs the start command. +func (o *options) run(cmd *cobra.Command) error { + item := &v1alpha1.NifiDataflow{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + + itemOriginal := item.DeepCopy() + labels := item.GetLabels() + + if !del { + labels[nifiutil.ForceStartLabel] = "true" + } else { + delete(labels, nifiutil.ForceStartLabel) + } + + item.SetLabels(labels) + err = o.Client.Patch(context.TODO(), item, client.MergeFrom(itemOriginal)) + + if err != nil { + cmd.Println(fmt.Sprintf("Couldn't patch %s/%s: %v", item.GetNamespace(), item.GetName(), err)) + } else { + cmd.Println(fmt.Sprintf("NifiDataflow labels patched successfully in %s/%s", item.GetNamespace(), item.GetName())) + } + + return nil +} diff --git a/cmd/kubectl-nifikop/nifidataflow/stop/stop.go b/cmd/kubectl-nifikop/nifidataflow/stop/stop.go new file mode 100644 index 0000000000..7d3fa68e4f --- /dev/null +++ b/cmd/kubectl-nifikop/nifidataflow/stop/stop.go @@ -0,0 +1,117 @@ +package stop + +import ( + "context" + "errors" + "fmt" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" + + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + del bool + stopExample = ` + # set label %[2]s to force the stop of NifiDataflow foo + %[1]s stop foo + + # remove label %[2]s that forces the stop of NifiDataflow foo + %[1]s stop foo +` +) + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "stop [NifiDataflow name]", + Short: fmt.Sprintf("Set label %s to true on NifiDataflow", nifiutil.ForceStopLabel), + Example: fmt.Sprintf(stopExample, "kubectl nifikop nifidataflow", nifiutil.ForceStopLabel), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run(c) + }, + } + + cmd.Flags().BoolVarP(&del, "delete", "d", false, fmt.Sprintf("Delete label %s on NifiDataflow", nifiutil.ForceStopLabel)) + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) != 1 { + return errors.New("one argument must provided") + } + return nil +} + +// run runs the stop command. +func (o *options) run(cmd *cobra.Command) error { + item := &v1alpha1.NifiDataflow{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiDataflow %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiDataflow: %w", err) + } + + itemOriginal := item.DeepCopy() + labels := item.GetLabels() + + if !del { + labels[nifiutil.ForceStopLabel] = "true" + } else { + delete(labels, nifiutil.ForceStopLabel) + } + + item.SetLabels(labels) + err = o.Client.Patch(context.TODO(), item, client.MergeFrom(itemOriginal)) + + if err != nil { + cmd.Println(fmt.Sprintf("Couldn't patch %s/%s: %v", item.GetNamespace(), item.GetName(), err)) + } else { + cmd.Println(fmt.Sprintf("NifiDataflow labels patched successfully in %s/%s", item.GetNamespace(), item.GetName())) + } + + return nil +} diff --git a/cmd/kubectl-nifikop/nifigroupautoscaler/get/get.go b/cmd/kubectl-nifikop/nifigroupautoscaler/get/get.go new file mode 100644 index 0000000000..17a388c89b --- /dev/null +++ b/cmd/kubectl-nifikop/nifigroupautoscaler/get/get.go @@ -0,0 +1,134 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiNodeGroupAutoscaler in the current namespace + %[1]s get + + # view NifiNodeGroupAutoscaler foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiNodeGroupAutoscaler name]", + Short: "Get NifiNodeGroupAutoscaler", + Example: fmt.Sprintf(getExample, "kubectl nifikop nifigroupautoscaler"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiNodeGroupAutoscalerList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiNodeGroupAutoscaler: %w", err) + } + } else { + item := &v1alpha1.NifiNodeGroupAutoscaler{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiNodeGroupAutoscaler %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiNodeGroupAutoscaler: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.State)) + data = append(data, strconv.Itoa(int(item.Status.Replicas))) + data = append(data, string(item.Status.Selector)) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "State", "Replicas", "Selector"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nifigroupautoscaler/nifigroupautoscaler.go b/cmd/kubectl-nifikop/nifigroupautoscaler/nifigroupautoscaler.go new file mode 100644 index 0000000000..dcfd1ac079 --- /dev/null +++ b/cmd/kubectl-nifikop/nifigroupautoscaler/nifigroupautoscaler.go @@ -0,0 +1,36 @@ +package nifigroupautoscaler + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifigroupautoscaler/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifigroupautoscaler [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifikop/nifikop.go b/cmd/kubectl-nifikop/nifikop/nifikop.go new file mode 100644 index 0000000000..b89f840d34 --- /dev/null +++ b/cmd/kubectl-nifikop/nifikop/nifikop.go @@ -0,0 +1,49 @@ +package nifikop + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nificluster" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nificonnection" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifidataflow" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifigroupautoscaler" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiregistryclient" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiuser" + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiusergroup" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by datadog command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// NewCmd provides a cobra command wrapping options for "datadog" command +func NewCmd(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifikop [subcommand] [flags]", + } + + // Operator commands + cmd.AddCommand(nificluster.New(streams)) + cmd.AddCommand(nifidataflow.New(streams)) + cmd.AddCommand(nificonnection.New(streams)) + cmd.AddCommand(nifiuser.New(streams)) + cmd.AddCommand(nifiusergroup.New(streams)) + cmd.AddCommand(nifiregistryclient.New(streams)) + cmd.AddCommand(nifigroupautoscaler.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifiregistryclient/get/get.go b/cmd/kubectl-nifikop/nifiregistryclient/get/get.go new file mode 100644 index 0000000000..91e14a132b --- /dev/null +++ b/cmd/kubectl-nifikop/nifiregistryclient/get/get.go @@ -0,0 +1,133 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiRegistryClient in the current namespace + %[1]s get + + # view NifiUserGroup foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiRegistryClient name]", + Short: "Get NifiRegistryClient", + Example: fmt.Sprintf(getExample, "kubectl nifikop nifiregistryclient"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiRegistryClientList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiRegistryClient: %w", err) + } + } else { + item := &v1alpha1.NifiRegistryClient{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiRegistryClient %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiRegistryClient: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.Id)) + data = append(data, strconv.Itoa(int(item.Status.Version))) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "Id", "Version"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nifiregistryclient/nifiregistryclient.go b/cmd/kubectl-nifikop/nifiregistryclient/nifiregistryclient.go new file mode 100644 index 0000000000..cb2c5feaf7 --- /dev/null +++ b/cmd/kubectl-nifikop/nifiregistryclient/nifiregistryclient.go @@ -0,0 +1,36 @@ +package nifiregistryclient + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiregistryclient/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifiregistryclient [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifiuser/get/get.go b/cmd/kubectl-nifikop/nifiuser/get/get.go new file mode 100644 index 0000000000..2cddfb48ed --- /dev/null +++ b/cmd/kubectl-nifikop/nifiuser/get/get.go @@ -0,0 +1,133 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiUser in the current namespace + %[1]s get + + # view NifiUser foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiUser name]", + Short: "Get NifiUser", + Example: fmt.Sprintf(getExample, "kubectl nifikop nifiuser"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiUserList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiUser: %w", err) + } + } else { + item := &v1alpha1.NifiUser{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiUser %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiUser: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.Id)) + data = append(data, strconv.Itoa(int(item.Status.Version))) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "Id", "Version"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nifiuser/nifiuser.go b/cmd/kubectl-nifikop/nifiuser/nifiuser.go new file mode 100644 index 0000000000..e94168edc7 --- /dev/null +++ b/cmd/kubectl-nifikop/nifiuser/nifiuser.go @@ -0,0 +1,36 @@ +package nifiuser + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiuser/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifiuser [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/cmd/kubectl-nifikop/nifiusergroup/get/get.go b/cmd/kubectl-nifikop/nifiusergroup/get/get.go new file mode 100644 index 0000000000..7a58efee77 --- /dev/null +++ b/cmd/kubectl-nifikop/nifiusergroup/get/get.go @@ -0,0 +1,133 @@ +package get + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/plugin/common" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cobra" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var getExample = ` + # view all NifiUserGroup in the current namespace + %[1]s get + + # view NifiUserGroup foo + %[1]s get foo +` + +// options provides information required by Datadog get command. +type options struct { + genericclioptions.IOStreams + common.Options + args []string + name string +} + +// newOptions provides an instance of getOptions with default values. +func newOptions(streams genericclioptions.IOStreams) *options { + o := &options{ + IOStreams: streams, + } + o.SetConfigFlags() + return o +} + +// New provides a cobra command wrapping options for "get" sub command. +func New(streams genericclioptions.IOStreams) *cobra.Command { + o := newOptions(streams) + cmd := &cobra.Command{ + Use: "get [NifiUserGroup name]", + Short: "Get NifiUserGroup", + Example: fmt.Sprintf(getExample, "kubectl nifikop nifiusergroup"), + SilenceUsage: true, + RunE: func(c *cobra.Command, args []string) error { + if err := o.complete(c, args); err != nil { + return err + } + if err := o.validate(); err != nil { + return err + } + return o.run() + }, + } + + o.ConfigFlags.AddFlags(cmd.Flags()) + + return cmd +} + +// complete sets all information required for processing the command. +func (o *options) complete(cmd *cobra.Command, args []string) error { + o.args = args + if len(args) > 0 { + o.name = args[0] + } + return o.Init(cmd) +} + +// validate ensures that all required arguments and flag values are provided. +func (o *options) validate() error { + if len(o.args) > 1 { + return errors.New("either one or no arguments are allowed") + } + return nil +} + +// run runs the get command. +func (o *options) run() error { + list := &v1alpha1.NifiUserGroupList{} + + if o.name == "" { + if err := o.Client.List(context.TODO(), list, &client.ListOptions{Namespace: o.UserNamespace}); err != nil { + return fmt.Errorf("unable to list NifiUserGroup: %w", err) + } + } else { + item := &v1alpha1.NifiUserGroup{} + err := o.Client.Get(context.TODO(), client.ObjectKey{Namespace: o.UserNamespace, Name: o.name}, item) + if err != nil && apierrors.IsNotFound(err) { + return fmt.Errorf("NifiUserGroup %s/%s not found", o.UserNamespace, o.name) + } else if err != nil { + return fmt.Errorf("unable to get NifiUserGroup: %w", err) + } + list.Items = append(list.Items, *item) + } + + table := newTable(o.Out) + for _, item := range list.Items { + data := []string{item.Namespace, item.Name} + + data = append(data, string(item.Status.Id)) + data = append(data, strconv.Itoa(int(item.Status.Version))) + + table.Append(data) + } + + // Send output. + table.Render() + + return nil +} + +func newTable(out io.Writer) *tablewriter.Table { + table := tablewriter.NewWriter(out) + table.SetHeader([]string{"Namespace", "Name", "Id", "Version"}) + table.SetBorders(tablewriter.Border{Left: false, Top: false, Right: false, Bottom: false}) + table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(false) + table.SetCenterSeparator("") + table.SetColumnSeparator("") + table.SetRowSeparator("") + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetHeaderLine(false) + return table +} diff --git a/cmd/kubectl-nifikop/nifiusergroup/nifiusergroup.go b/cmd/kubectl-nifikop/nifiusergroup/nifiusergroup.go new file mode 100644 index 0000000000..009d2273ca --- /dev/null +++ b/cmd/kubectl-nifikop/nifiusergroup/nifiusergroup.go @@ -0,0 +1,36 @@ +package nifiusergroup + +import ( + "github.com/konpyutaika/nifikop/cmd/kubectl-nifikop/nifiusergroup/get" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// options provides information required by clusteragent command +type options struct { + genericclioptions.IOStreams + configFlags *genericclioptions.ConfigFlags +} + +// newOptions provides an instance of options with default values +func newOptions(streams genericclioptions.IOStreams) *options { + return &options{ + configFlags: genericclioptions.NewConfigFlags(false), + IOStreams: streams, + } +} + +// New provides a cobra command wrapping options for "clusteragent" sub command +func New(streams genericclioptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "nifiusergroup [subcommand] [flags]", + } + + cmd.AddCommand(get.New(streams)) + + o := newOptions(streams) + o.configFlags.AddFlags(cmd.Flags()) + + return cmd +} diff --git a/config/crd/bases/nifi.konpyutaika.com_nificonnections.yaml b/config/crd/bases/nifi.konpyutaika.com_nificonnections.yaml new file mode 100644 index 0000000000..d49d338439 --- /dev/null +++ b/config/crd/bases/nifi.konpyutaika.com_nificonnections.yaml @@ -0,0 +1,138 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: nificonnections.nifi.konpyutaika.com +spec: + group: nifi.konpyutaika.com + names: + kind: NifiConnection + listKind: NifiConnectionList + plural: nificonnections + singular: nificonnection + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + backPressureDataSizeThreshold: + default: 1 GB + type: string + backPressureObjectThreshold: + default: 10000 + format: int64 + type: integer + bends: + items: + properties: + posX: + format: int64 + type: integer + posY: + format: int64 + type: integer + type: object + type: array + flowFileExpiration: + type: string + labelIndex: + format: int32 + type: integer + loadBalanceCompression: + default: DO_NOT_COMPRESS + enum: + - DO_NOT_COMPRESS + - COMPRESS_ATTRIBUTES_ONLY + - COMPRESS_ATTRIBUTES_AND_CONTENT + type: string + loadBalancePartitionAttribute: + type: string + loadBalanceStrategy: + default: DO_NOT_LOAD_BALANCE + enum: + - DO_NOT_LOAD_BALANCE + - PARTITION_BY_ATTRIBUTE + - ROUND_ROBIN + - SINGLE + type: string + prioritizers: + items: + enum: + - FirstInFirstOutPrioritizer + - NewestFlowFileFirstPrioritizer + - OldestFlowFileFirstPrioritizer + - PriorityAttributePrioritizer + type: string + type: array + type: object + destination: + properties: + name: + type: string + namespace: + type: string + subName: + type: string + type: + enum: + - dataflow + type: string + required: + - name + - type + type: object + source: + properties: + name: + type: string + namespace: + type: string + subName: + type: string + type: + enum: + - dataflow + type: string + required: + - name + - type + type: object + updateStrategy: + enum: + - drop + - drain + type: string + required: + - destination + - source + - updateStrategy + type: object + status: + properties: + connectionID: + type: string + state: + type: string + required: + - connectionID + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index bc29d6923a..bfa2ae97da 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/nifi.konpyutaika.com_nifiregistryclients.yaml - bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml +- bases/nifi.konpyutaika.com_nificonnections.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -22,6 +23,7 @@ patchesStrategicMerge: #- patches/webhook_in_nifiparametercontexts.yaml #- patches/webhook_in_nifiregistryclients.yaml #- patches/webhook_in_nifinodegroupautoscalers.yaml +#- patches/webhook_in_nificonnections.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -33,6 +35,7 @@ patchesStrategicMerge: #- patches/cainjection_in_nifiparametercontexts.yaml #- patches/cainjection_in_nifiregistryclients.yaml #- patches/cainjection_in_nifinodegroupautoscalers.yaml +#- patches/cainjection_in_nificonnections.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_nificonnections.yaml b/config/crd/patches/cainjection_in_nificonnections.yaml new file mode 100644 index 0000000000..ab3cd6d4d0 --- /dev/null +++ b/config/crd/patches/cainjection_in_nificonnections.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: nificonnections.nifi.konpyutaika.com diff --git a/config/crd/patches/webhook_in_nificonnections.yaml b/config/crd/patches/webhook_in_nificonnections.yaml new file mode 100644 index 0000000000..5f2b2e2227 --- /dev/null +++ b/config/crd/patches/webhook_in_nificonnections.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: nificonnections.nifi.konpyutaika.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/nificonnection_editor_role.yaml b/config/rbac/nificonnection_editor_role.yaml new file mode 100644 index 0000000000..5ffdc30a22 --- /dev/null +++ b/config/rbac/nificonnection_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit nificonnections. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nificonnection-editor-role +rules: +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections/status + verbs: + - get diff --git a/config/rbac/nificonnection_viewer_role.yaml b/config/rbac/nificonnection_viewer_role.yaml new file mode 100644 index 0000000000..4c5571a5ef --- /dev/null +++ b/config/rbac/nificonnection_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view nificonnections. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: nificonnection-viewer-role +rules: +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections + verbs: + - get + - list + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 1a013c10d0..cdd459089c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -146,6 +146,32 @@ rules: - get - patch - update +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections/finalizers + verbs: + - update +- apiGroups: + - nifi.konpyutaika.com + resources: + - nificonnections/status + verbs: + - get + - patch + - update - apiGroups: - nifi.konpyutaika.com resources: diff --git a/config/samples/nifi_v1alpha1_nificonnection.yaml b/config/samples/nifi_v1alpha1_nificonnection.yaml new file mode 100644 index 0000000000..0768fe7642 --- /dev/null +++ b/config/samples/nifi_v1alpha1_nificonnection.yaml @@ -0,0 +1,55 @@ +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiConnection +metadata: + name: nificonnection-sample +spec: + # the Source component of the connection. + source: + # the name of the source component. + name: input + # the namespace of the source component. + namespace: nifikop + # the name of the source sub component (e.g. queue or port name). + subName: output + # the type of the source component (e.g. nifidataflow). + type: dataflow + # the Destination component of the connection. + destination: + # the name of the destination component. + name: output + # the namespace of the destination component. + namespace: nifikop + # the name of the destination sub component (e.g. queue or port name). + subName: input + # the type of the destination component (e.g. nifidataflow). + type: dataflow + # the Configuration of the connection. + configuration: + # the maximum amount of time an object may be in the flow before it will be automatically aged out of the flow. + flowFileExpiration: 1 hour + # the maximum data size of objects that can be queued before back pressure is applied. + backPressureDataSizeThreshold: 100 GB + # the maximum number of objects that can be queued before back pressure is applied. + backPressureObjectThreshold: 10000 + # how to load balance the data in this Connection across the nodes in the cluster. + LoadBalanceStrategy: PARTITION_BY_ATTRIBUTE + # the FlowFile Attribute to use for determining which node a FlowFile will go to. + LoadBalancePartitionAttribute: partition_attribute + # whether or not data should be compressed when being transferred between nodes in the cluster. + LoadBalanceCompression: DO_NOT_COMPRESS + # the comparators used to prioritize the queue. + Prioritizers: + - NewestFlowFileFirstPrioritizer + - FirstInFirstOutPrioritizer + # the index of the bend point where to place the connection label. + labelIndex: 0 + # the bend points on the connection. + bends: + - posX: 550 + posY: 550 + - posX: 550 + posY: 440 + - posX: 550 + posY: 88 + # describes the way the operator will deal with data when a connection will be updated : drop or drain. + updateStrategy: drain diff --git a/controllers/controller_common.go b/controllers/controller_common.go index dfb9958190..4381a47b27 100644 --- a/controllers/controller_common.go +++ b/controllers/controller_common.go @@ -2,10 +2,12 @@ package controllers import ( "fmt" - "github.com/konpyutaika/nifikop/api/v1" "strings" "time" + v1 "github.com/konpyutaika/nifikop/api/v1" + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/go-logr/logr" "github.com/konpyutaika/nifikop/pkg/util/clientconfig" "go.uber.org/zap" @@ -166,6 +168,17 @@ func GetUserRefNamespace(ns string, ref v1.UserReference) string { return userNamespace } +// getComponentRefNamespace returns the expected namespace for a Nifi cluster +// referenced by a user/dataflow CR. It takes the namespace of the CR as the first +// argument and the reference itself as the second. +func GetComponentRefNamespace(ns string, ref v1alpha1.ComponentReference) string { + componentNamespace := ref.Namespace + if componentNamespace == "" { + return ns + } + return componentNamespace +} + func GetLogConstructor(mgr manager.Manager, obj runtime.Object) (func(*reconcile.Request) logr.Logger, error) { // Retrieve the GVK from the object we're reconciling diff --git a/controllers/controller_common_test.go b/controllers/controller_common_test.go index 1b21a9d8f2..91d35e86cf 100644 --- a/controllers/controller_common_test.go +++ b/controllers/controller_common_test.go @@ -2,11 +2,13 @@ package controllers import ( "errors" - "github.com/konpyutaika/nifikop/api/v1" "reflect" "testing" "time" + v1 "github.com/konpyutaika/nifikop/api/v1" + "github.com/konpyutaika/nifikop/api/v1alpha1" + "go.uber.org/zap" "github.com/konpyutaika/nifikop/pkg/errorfactory" @@ -45,6 +47,76 @@ func TestGetClusterRefNamespace(t *testing.T) { } } +func TestGetRegistryClientRefNamespace(t *testing.T) { + ns := "test-namespace" + ref := v1.RegistryClientReference{ + Name: "test-cluster", + } + if refNS := GetRegistryClientRefNamespace(ns, ref); refNS != "test-namespace" { + t.Error("Expected to get 'test-namespace', got:", refNS) + } + ref.Namespace = "another-namespace" + if refNS := GetRegistryClientRefNamespace(ns, ref); refNS != "another-namespace" { + t.Error("Expected to get 'another-namespace', got:", refNS) + } +} + +func TestGetParameterContextRefNamespace(t *testing.T) { + ns := "test-namespace" + ref := v1.ParameterContextReference{ + Name: "test-cluster", + } + if refNS := GetParameterContextRefNamespace(ns, ref); refNS != "test-namespace" { + t.Error("Expected to get 'test-namespace', got:", refNS) + } + ref.Namespace = "another-namespace" + if refNS := GetParameterContextRefNamespace(ns, ref); refNS != "another-namespace" { + t.Error("Expected to get 'another-namespace', got:", refNS) + } +} + +func TestGetSecretRefNamespace(t *testing.T) { + ns := "test-namespace" + ref := v1.SecretReference{ + Name: "test-cluster", + } + if refNS := GetSecretRefNamespace(ns, ref); refNS != "test-namespace" { + t.Error("Expected to get 'test-namespace', got:", refNS) + } + ref.Namespace = "another-namespace" + if refNS := GetSecretRefNamespace(ns, ref); refNS != "another-namespace" { + t.Error("Expected to get 'another-namespace', got:", refNS) + } +} + +func TestGetUserRefNamespace(t *testing.T) { + ns := "test-namespace" + ref := v1.UserReference{ + Name: "test-cluster", + } + if refNS := GetUserRefNamespace(ns, ref); refNS != "test-namespace" { + t.Error("Expected to get 'test-namespace', got:", refNS) + } + ref.Namespace = "another-namespace" + if refNS := GetUserRefNamespace(ns, ref); refNS != "another-namespace" { + t.Error("Expected to get 'another-namespace', got:", refNS) + } +} + +func TestGetComponentRefNamespace(t *testing.T) { + ns := "test-namespace" + ref := v1alpha1.ComponentReference{ + Name: "test-cluster", + } + if refNS := GetComponentRefNamespace(ns, ref); refNS != "test-namespace" { + t.Error("Expected to get 'test-namespace', got:", refNS) + } + ref.Namespace = "another-namespace" + if refNS := GetComponentRefNamespace(ns, ref); refNS != "another-namespace" { + t.Error("Expected to get 'another-namespace', got:", refNS) + } +} + func TestClusterLabelString(t *testing.T) { cluster := &v1.NifiCluster{} cluster.Name = "test-cluster" diff --git a/controllers/nificluster_controller.go b/controllers/nificluster_controller.go index 4d8ac78bbd..add356bcf4 100644 --- a/controllers/nificluster_controller.go +++ b/controllers/nificluster_controller.go @@ -43,8 +43,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var clusterFinalizer = "nificlusters.nifi.konpyutaika.com/finalizer" -var clusterUsersFinalizer = "nificlusters.nifi.konpyutaika.com/users" +var clusterFinalizer string = fmt.Sprintf("nificlusters.%s/finalizer", v1.GroupVersion.Group) +var clusterUsersFinalizer string = fmt.Sprintf("nificlusters.%s/users", v1.GroupVersion.Group) // NifiClusterReconciler reconciles a NifiCluster object type NifiClusterReconciler struct { diff --git a/controllers/nificonnection_controller.go b/controllers/nificonnection_controller.go new file mode 100644 index 0000000000..cb7715719b --- /dev/null +++ b/controllers/nificonnection_controller.go @@ -0,0 +1,1028 @@ +/* +Copyright 2020. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + + "emperror.dev/errors" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/banzaicloud/k8s-objectmatcher/patch" + v1 "github.com/konpyutaika/nifikop/api/v1" + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/clientwrappers/connection" + "github.com/konpyutaika/nifikop/pkg/clientwrappers/dataflow" + "github.com/konpyutaika/nifikop/pkg/errorfactory" + "github.com/konpyutaika/nifikop/pkg/k8sutil" + "github.com/konpyutaika/nifikop/pkg/nificlient/config" + "github.com/konpyutaika/nifikop/pkg/util" + "github.com/konpyutaika/nifikop/pkg/util/clientconfig" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" + "github.com/konpyutaika/nigoapi/pkg/nifi" +) + +var connectionFinalizer string = fmt.Sprintf("nificonnections.%s/finalizer", v1alpha1.GroupVersion.Group) + +// NifiConnectionReconciler reconciles a NifiConnection object +type NifiConnectionReconciler struct { + client.Client + Log zap.Logger + Scheme *runtime.Scheme + Recorder record.EventRecorder + RequeueInterval int + RequeueOffset int +} + +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nificonnections,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nificonnections/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=nifi.konpyutaika.com,resources=nificonnections/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.1/pkg/reconcile +func (r *NifiConnectionReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + interval := util.GetRequeueInterval(r.RequeueInterval, r.RequeueOffset) + var err error + + // Fetch the NifiConnection instance + instance := &v1alpha1.NifiConnection{} + if err = r.Client.Get(ctx, req.NamespacedName, instance); err != nil { + if apierrors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + return Reconciled() + } + // Error reading the object - requeue the request. + return RequeueWithError(r.Log, err.Error(), err) + } + + // Get the last configuration viewed by the operator. + o, _ := patch.DefaultAnnotator.GetOriginalConfiguration(instance) + // Create it if not exist. + if o == nil { + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+instance.Name, err) + } + o, _ = patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } + + // Get the last NiFiCluster viewed by the operator. + cr, _ := k8sutil.GetAnnotation(nifiutil.LastAppliedClusterAnnotation, instance) + // Create it if not exist. + if cr == nil { + jsonResource, err := json.Marshal(v1.ClusterReference{}) + if err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + if err := k8sutil.SetAnnotation(nifiutil.LastAppliedClusterAnnotation, instance, jsonResource); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+instance.Name, err) + } + cr, _ = patch.DefaultAnnotator.GetOriginalConfiguration(instance) + } + + // Check if the source or the destination changed + original := &v1alpha1.NifiConnection{} + originalClusterRef := &v1.ClusterReference{} + current := instance.DeepCopy() + json.Unmarshal(o, original) + json.Unmarshal(cr, originalClusterRef) + + // Validate component + if !instance.Spec.Configuration.IsValid() { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ConfigurationInvalid", + fmt.Sprintf("Failed to validate the connection configuration: %s in %s of type %s", + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type)) + return RequeueWithError(r.Log, "failed to validate the configuration of connection "+instance.Name, err) + } + + // Retrieve the namespace of the source component + instance.Spec.Source.Namespace = GetComponentRefNamespace(instance.Namespace, instance.Spec.Source) + // If the source component is invalid, requeue with error + if !instance.Spec.Source.IsValid() { + r.Recorder.Event(instance, corev1.EventTypeWarning, "SourceInvalid", + fmt.Sprintf("Failed to validate the source component: %s in %s of type %s", + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type)) + return RequeueWithError(r.Log, "failed to validate source component "+instance.Spec.Source.Name, err) + } + + // Retrieve the namespace of the destination component + instance.Spec.Destination.Namespace = GetComponentRefNamespace(instance.Namespace, instance.Spec.Destination) + // If the destination component is invalid, requeue with error + if !instance.Spec.Destination.IsValid() { + r.Recorder.Event(instance, corev1.EventTypeWarning, "DestinationInvalid", + fmt.Sprintf("Failed to validate the destination component: %s in %s of type %s", + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to validate destination component "+instance.Spec.Destination.Name, err) + } + + // Check if the 2 components are in the same NifiCluster and retrieve it + currentClusterRef, err := r.RetrieveNifiClusterRef(instance.Spec.Source, instance.Spec.Destination) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to determine the cluster of the connection between %s in %s of type %s and %s in %s of type %s", + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to determine the cluster of the connection "+instance.Name, err) + } + + // Get the client config manager associated to the cluster ref. + clusterRef := *originalClusterRef + // Set the clusterRef to the current one if the original one is empty (= new resource) + if clusterRef.Name == "" && clusterRef.Namespace == "" { + clusterRef = *currentClusterRef + } + + // Ìn case of the cluster reference changed. + if !v1.ClusterRefsEquals([]v1.ClusterReference{clusterRef, *currentClusterRef}) { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Generate the connect object + configManager := config.GetClientConfigManager(r.Client, clusterRef) + if clusterConnect, err = configManager.BuildConnect(); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, current); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() + } + + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster: %s in %s", + clusterRef.Name, clusterRef.Namespace)) + + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady(r.Log) { + r.Log.Debug("Cluster is not ready yet, will wait until it is.", + zap.String("clusterName", clusterRef.Name), + zap.String("connection", instance.Name)) + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet for connection %s: %s in %s", + instance.Name, clusterRef.Name, clusterConnect.Id())) + } + + // Delete the resource on the previous cluster. + err := r.DeleteConnection(ctx, clientConfig, original, instance) + if err != nil { + switch errors.Cause(err).(type) { + // If the connection is still deleting, requeue + case errorfactory.NifiConnectionDeleting: + r.Recorder.Event(instance, corev1.EventTypeWarning, "Deleting", + fmt.Sprintf("Deleting the connection %s between %s in %s of type %s and %s in %s of type %s", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + // If error during deletion, requeue with error + default: + r.Recorder.Event(instance, corev1.EventTypeWarning, "DeleteError", + fmt.Sprintf("Failed to delete the connection %s between %s in %s of type %s and %s in %s of type %s", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to delete NifiConnection "+instance.Name, err) + } + } + + r.Recorder.Event(instance, corev1.EventTypeWarning, "Deleted", + fmt.Sprintf("The connection %s between %s in %s of type %s and %s in %s of type %s has been deleted", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + + // Update the last view configuration to the current one. + clusterRefJsonResource, err := json.Marshal(v1.ClusterReference{}) + if err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + if err := k8sutil.SetAnnotation(nifiutil.LastAppliedClusterAnnotation, instance, clusterRefJsonResource); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + // Update last-applied annotation + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+instance.Name, err) + } + + return RequeueAfter(interval) + } + + // LookUp component + // Source lookup + sourceComponent := &v1alpha1.ComponentInformation{} + if instance.Spec.Source.Type == v1alpha1.ComponentDataflow { + sourceComponent, err = r.GetDataflowComponentInformation(instance.Spec.Source, true) + } + + // If the source cannot be found, requeue with error + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "SourceNotFound", + fmt.Sprintf("Failed to retrieve source component information: %s in %s of type %s", + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type)) + return RequeueWithError(r.Log, "failed to retrieve source component "+instance.Spec.Source.Name, err) + } + + // Destination lookup + destinationComponent := &v1alpha1.ComponentInformation{} + if instance.Spec.Source.Type == v1alpha1.ComponentDataflow { + destinationComponent, err = r.GetDataflowComponentInformation(instance.Spec.Destination, false) + } + + // If the destination cannot be found, requeue with error + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "DestinationNotFound", + fmt.Sprintf("Failed to retrieve destination component information: %s in %s of type %s", + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to retrieve destination component "+instance.Spec.Destination.Name, err) + } + + // Check if the 2 components are on the same level in the NiFi canvas + if sourceComponent.ParentGroupId != destinationComponent.ParentGroupId { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ParentGroupIdError", + fmt.Sprintf("Failed to match parent group id from %s in %s of type %s to %s in %s of type %s", + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to match parent group id", err) + } + + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Generate the connect object + configManager := config.GetClientConfigManager(r.Client, clusterRef) + if clusterConnect, err = configManager.BuildConnect(); err != nil { + // This shouldn't trigger anymore, but leaving it here as a safetybelt + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + r.Log.Info("Cluster is already gone, there is nothing we can do") + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer", err) + } + return Reconciled() + } + + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to lookup referenced cluster", err) + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", + fmt.Sprintf("Failed to create HTTP client for the referenced cluster: %s in %s", + clusterRef.Name, clusterRef.Namespace)) + // the cluster is gone, so just remove the finalizer + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + if err = r.removeFinalizer(ctx, instance); err != nil { + return RequeueWithError(r.Log, fmt.Sprintf("failed to remove finalizer from NifiConnection %s", instance.Name), err) + } + return Reconciled() + } + // the cluster does not exist - should have been caught pre-flight + return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) + } + + // Check if marked for deletion and if so run finalizers + if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { + return r.checkFinalizers(ctx, instance, clientConfig) + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady(r.Log) { + r.Log.Debug("Cluster is not ready yet, will wait until it is.", + zap.String("clusterName", clusterRef.Name), + zap.String("connection", instance.Name)) + r.Recorder.Event(instance, corev1.EventTypeNormal, "ReferenceClusterNotReady", + fmt.Sprintf("The referenced cluster is not ready yet for connection %s: %s in %s", + instance.Name, clusterRef.Name, clusterConnect.Id())) + + // the cluster does not exist - should have been caught pre-flight + return RequeueAfter(interval) + } + + r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", + fmt.Sprintf("Reconciling connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + + // Check if the connection already exists + existing, err := connection.ConnectionExist(instance, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failure checking for existing connection named "+instance.Name, err) + } + + // If the connection does not exist, create it + if !existing { + connectionStatus, err := connection.CreateConnection(instance, sourceComponent, destinationComponent, clientConfig) + if err != nil { + r.Recorder.Event(instance, corev1.EventTypeWarning, "CreationFailed", + fmt.Sprintf("Creation failed connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failure creating connection "+instance.Name, err) + } + + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + // Update the last view configuration to the current one. + clusterRefJsonResource, err := json.Marshal(clusterRef) + if err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + + if err := k8sutil.SetAnnotation(nifiutil.LastAppliedClusterAnnotation, instance, clusterRefJsonResource); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for connection "+instance.Name, err) + } + // Update last-applied annotation + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+instance.Name, err) + } + + // Set connection status + if instance.Status.State == v1alpha1.ConnectionStateOutOfSync { + connectionStatus.State = v1alpha1.ConnectionStateOutOfSync + } else { + connectionStatus.State = v1alpha1.ConnectionStateCreated + } + instance.Status = *connectionStatus + if err := r.Client.Status().Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update status for NifiConnection "+instance.Name, err) + } + + r.Recorder.Event(instance, corev1.EventTypeNormal, "Created", + fmt.Sprintf("Created connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + } + + // Ensure finalizer for cleanup on deletion + if !util.StringSliceContains(instance.GetFinalizers(), connectionFinalizer) { + r.Log.Info("Adding Finalizer for NifiConnection") + instance.SetFinalizers(append(instance.GetFinalizers(), connectionFinalizer)) + } + + // Push any changes + if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+current.Name, err) + } + + // If the connection is out of sync, sync it + if instance.Status.State == v1alpha1.ConnectionStateOutOfSync { + status, err := connection.SyncConnectionConfig(instance, sourceComponent, destinationComponent, clientConfig) + if status != nil { + instance.Status = *status + if err := r.Client.Status().Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update status for NifiConnection "+instance.Name, err) + } + } + if err != nil { + switch errors.Cause(err).(type) { + // If the connection is still syncing, requeue + case errorfactory.NifiConnectionSyncing: + r.Log.Debug("Connection syncing", + zap.String("connection", instance.Name)) + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + // If the connection needs to be deleted, delete it + case errorfactory.NifiConnectionDeleting: + err = r.DeleteConnection(ctx, clientConfig, original, instance) + if err != nil { + switch errors.Cause(err).(type) { + // If the connection is still deleting, requeue + case errorfactory.NifiConnectionDeleting: + r.Recorder.Event(instance, corev1.EventTypeWarning, "Deleting", + fmt.Sprintf("Deleting the connection %s between %s in %s of type %s and %s in %s of type %s", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + // If error during deletion, requeue with error + default: + r.Recorder.Event(instance, corev1.EventTypeWarning, "DeleteError", + fmt.Sprintf("Failed to delete the connection %s between %s in %s of type %s and %s in %s of type %s", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to delete NifiConnection "+instance.Name, err) + } + // If the connection has been deleted, requeue + } else { + r.Recorder.Event(instance, corev1.EventTypeWarning, "Deleted", + fmt.Sprintf("The connection %s between %s in %s of type %s and %s in %s of type %s has been deleted", + original.Name, + original.Spec.Source.Name, original.Spec.Source.Namespace, original.Spec.Source.Type, + original.Spec.Destination.Name, original.Spec.Destination.Namespace, original.Spec.Destination.Type)) + + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + } + // If error during syncing, requeue with error + default: + r.Recorder.Event(instance, corev1.EventTypeWarning, "SynchronizingFailed", + fmt.Sprintf("Syncing connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + return RequeueWithError(r.Log, "failed to sync NifiConnection "+instance.Name, err) + } + } + + // Update the last view configuration to the current one. + if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { + return RequeueWithError(r.Log, "could not apply last state to annotation for dataflow "+instance.Name, err) + } + // Update last-applied annotation + if err := r.Client.Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection "+instance.Name, err) + } + + // Update the state of the connection to indicate that it is synced + instance.Status.State = v1alpha1.ConnectionStateInSync + if err := r.Client.Status().Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update status for NifiConnection "+instance.Name, err) + } + + r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronized", + fmt.Sprintf("Synchronized connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + } + + // Check if the connection is out of sync + isOutOfSink, err := connection.IsOutOfSyncConnection(instance, sourceComponent, destinationComponent, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to check sync for NifiConnection "+instance.Name, err) + } + + // If the connection is out of sync, update the state of the connection to indicate it + if isOutOfSink { + instance.Status.State = v1alpha1.ConnectionStateOutOfSync + if err := r.Client.Status().Update(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update status for NifiConnection "+instance.Name, err) + } + return RequeueAfter(interval / 3) + } + + // Ensure NifiConnection label + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { + return RequeueWithError(r.Log, "failed to ensure NifiConnection label on connection", err) + } + + // Push any changes + if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { + return RequeueWithError(r.Log, "failed to update NifiConnection", err) + } + + r.Log.Debug("Ensured Connection", + zap.String("sourceName", instance.Spec.Source.Name), + zap.String("sourceNamespace", instance.Spec.Source.Namespace), + zap.String("sourceType", string(instance.Spec.Source.Type)), + zap.String("destinationName", instance.Spec.Destination.Name), + zap.String("destinationNamespace", instance.Spec.Destination.Namespace), + zap.String("destinationType", string(instance.Spec.Destination.Type)), + zap.String("connection", instance.Name)) + + r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciled", + fmt.Sprintf("Success fully reconciled connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + + return RequeueAfter(interval / 3) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *NifiConnectionReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.NifiConnection{}). + Complete(r) +} + +// Set the label specifying the cluster used by the NifiConnection +func (r *NifiConnectionReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, + connection *v1alpha1.NifiConnection) (*v1alpha1.NifiConnection, error) { + + labels := ApplyClusterReferenceLabel(cluster, connection.GetLabels()) + if !reflect.DeepEqual(labels, connection.GetLabels()) { + connection.SetLabels(labels) + return r.updateAndFetchLatest(ctx, connection) + } + return connection, nil +} + +// Update the NifiConnection resource and return the latest version of it +func (r *NifiConnectionReconciler) updateAndFetchLatest(ctx context.Context, + connection *v1alpha1.NifiConnection) (*v1alpha1.NifiConnection, error) { + + typeMeta := connection.TypeMeta + err := r.Client.Update(ctx, connection) + if err != nil { + return nil, err + } + connection.TypeMeta = typeMeta + return connection, nil +} + +// Check if the finalizer is present on the NifiConnection resource +func (r *NifiConnectionReconciler) checkFinalizers( + ctx context.Context, + connection *v1alpha1.NifiConnection, + config *clientconfig.NifiConfig) (reconcile.Result, error) { + r.Log.Info(fmt.Sprintf("NiFi connection %s is marked for deletion", connection.Name)) + var err error + if util.StringSliceContains(connection.GetFinalizers(), connectionFinalizer) { + if err = r.finalizeNifiConnection(ctx, connection, config); err != nil { + return RequeueWithError(r.Log, "failed to finalize connection", err) + } + if err = r.removeFinalizer(ctx, connection); err != nil { + return RequeueWithError(r.Log, "failed to remove finalizer from connection", err) + } + } + return Reconciled() +} + +// Remove the finalizer on the NifiConnection resource +func (r *NifiConnectionReconciler) removeFinalizer(ctx context.Context, connection *v1alpha1.NifiConnection) error { + r.Log.Info("Removing finalizer for NifiConnection", + zap.String("connection", connection.Name)) + connection.SetFinalizers(util.StringSliceRemove(connection.GetFinalizers(), connectionFinalizer)) + _, err := r.updateAndFetchLatest(ctx, connection) + return err +} + +// Delete the connection to finalize the NifiConnection +func (r *NifiConnectionReconciler) finalizeNifiConnection( + ctx context.Context, + instance *v1alpha1.NifiConnection, + config *clientconfig.NifiConfig) error { + r.Log.Debug("Finalize the NifiConnection", + zap.String("connection", instance.Name)) + + exists, err := connection.ConnectionExist(instance, config) + if err != nil { + return err + } + + // Check if the connection still exists in NiFi + if exists { + r.Recorder.Event(instance, corev1.EventTypeNormal, "Removing", + fmt.Sprintf("Removing connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + + // Delete the connection + if err := r.DeleteConnection(ctx, config, instance, instance); err != nil { + return err + } + + r.Recorder.Event(instance, corev1.EventTypeNormal, "Removed", + fmt.Sprintf("Removed connection %s between %s in %s of type %s and %s in %s of type %s", + instance.Name, + instance.Spec.Source.Name, instance.Spec.Source.Namespace, instance.Spec.Source.Type, + instance.Spec.Destination.Name, instance.Spec.Destination.Namespace, instance.Spec.Destination.Type)) + + r.Log.Info("Connection deleted", + zap.String("connection", instance.Name)) + } + + return nil +} + +// Delete the connection +func (r *NifiConnectionReconciler) DeleteConnection(ctx context.Context, clientConfig *clientconfig.NifiConfig, + original *v1alpha1.NifiConnection, instance *v1alpha1.NifiConnection) error { + r.Log.Debug("Delete the connection", + zap.String("name", instance.Name), + zap.String("sourceName", original.Spec.Source.Name), + zap.String("sourceNamespace", original.Spec.Source.Namespace), + zap.String("sourceType", string(original.Spec.Source.Type)), + zap.String("destinationName", original.Spec.Destination.Name), + zap.String("destinationNamespace", original.Spec.Destination.Namespace), + zap.String("destinationType", string(original.Spec.Destination.Type))) + + // Check if the source component is a NifiDataflow + if original.Spec.Source.Type == v1alpha1.ComponentDataflow { + // Retrieve NifiDataflow information + sourceInstance, err := k8sutil.LookupNifiDataflow(r.Client, original.Spec.Source.Name, original.Spec.Source.Namespace) + if err != nil { + return err + } + + // Check is the NifiDataflow's update strategy is on drain + if sourceInstance.Spec.UpdateStrategy == v1.DrainStrategy { + // Check if the dataflow is empty + isEmpty, err := dataflow.IsDataflowEmpty(sourceInstance, clientConfig) + if err != nil { + return err + } + + // If the dataflow is empty, stop the output-port of the dataflow + if isEmpty { + if err := r.StopDataflowComponent(ctx, original.Spec.Source, true); err != nil { + return err + } + } + } + } + + // Check if the destination component is a NifiDataflow + if original.Spec.Destination.Type == v1alpha1.ComponentDataflow { + // Retrieve NifiDataflow information + destinationInstance, err := k8sutil.LookupNifiDataflow(r.Client, original.Spec.Destination.Name, original.Spec.Destination.Namespace) + if err != nil { + return err + } + + // If the NifiDataflow's update strategy is on drop and the NifiConnection's too, stop the input-port of the dataflow + if destinationInstance.Spec.UpdateStrategy == v1.DropStrategy && instance.Spec.UpdateStrategy == v1.DropStrategy { + if err := r.StopDataflowComponent(ctx, original.Spec.Destination, false); err != nil { + return err + } + } + + // Retrieve the connection information + connectionEntity, err := connection.GetConnectionInformation(instance, clientConfig) + if err != nil { + return err + } + if connectionEntity == nil { + return nil + } + + // If the source is stopped, the connection is not empty and the connections's update strategy is on drain: + // force the dataflow to stay started + if !connectionEntity.Component.Source.Running && + connectionEntity.Status.AggregateSnapshot.FlowFilesQueued != 0 && + instance.Spec.UpdateStrategy == v1.DrainStrategy { + if err := r.ForceStartDataflowComponent(ctx, original.Spec.Destination); err != nil { + return err + } + // If the source is stopped, the destination is running and the connection is empty: + // unforce the dataflow to stay started and stop the input-port of the dataflow + } else if !connectionEntity.Component.Source.Running && connectionEntity.Component.Destination.Running && + connectionEntity.Status.AggregateSnapshot.FlowFilesQueued == 0 { + if err := r.UnForceStartDataflowComponent(ctx, original.Spec.Destination); err != nil { + return err + } + if err := r.StopDataflowComponent(ctx, original.Spec.Destination, false); err != nil { + return err + } + // If the source is stopped, the destination is stopped, the connection is not empty and the destination's update strategy is on drop: + // empty the connection + } else if !connectionEntity.Component.Source.Running && !connectionEntity.Component.Destination.Running && + connectionEntity.Status.AggregateSnapshot.FlowFilesQueued != 0 && destinationInstance.Spec.UpdateStrategy == v1.DropStrategy && + instance.Spec.UpdateStrategy == v1.DropStrategy { + if err := connection.DropConnectionFlowFiles(instance, clientConfig); err != nil { + return err + } + // If the source is stopped, the destination is stopped and the connection is empty: + // delete the connection, unstop the output-port of the source and unstop the input-port of th destination + } else if !connectionEntity.Component.Source.Running && !connectionEntity.Component.Destination.Running && + connectionEntity.Status.AggregateSnapshot.FlowFilesQueued == 0 { + if err := connection.DeleteConnection(instance, clientConfig); err != nil { + return err + } + + // Check if the source component is a NifiDataflow + if original.Spec.Source.Type == v1alpha1.ComponentDataflow { + if err := r.UnStopDataflowComponent(ctx, original.Spec.Source, true); err != nil { + return err + } + } + + if err := r.UnStopDataflowComponent(ctx, original.Spec.Destination, false); err != nil { + return err + } + return nil + } + } + return errorfactory.NifiConnectionDeleting{} +} + +// Retrieve the clusterRef based on the source and the destination of the connection +func (r *NifiConnectionReconciler) RetrieveNifiClusterRef(src v1alpha1.ComponentReference, dst v1alpha1.ComponentReference) (*v1.ClusterReference, error) { + r.Log.Debug("Retrieve the cluster reference from the source and the destination", + zap.String("sourceName", src.Name), + zap.String("sourceNamespace", src.Namespace), + zap.String("sourceType", string(src.Type)), + zap.String("destinationName", dst.Name), + zap.String("destinationNamespace", dst.Namespace), + zap.String("destinationType", string(dst.Type))) + + var srcClusterRef = v1.ClusterReference{} + // Retrieve the source clusterRef from a NifiDataflow resource + if src.Type == v1alpha1.ComponentDataflow { + srcDataflow, err := k8sutil.LookupNifiDataflow(r.Client, src.Name, src.Namespace) + if err != nil { + return nil, err + } + + srcClusterRef = srcDataflow.Spec.ClusterRef + } + + var dstClusterRef = v1.ClusterReference{} + // Retrieve the destination clusterRef from a NifiDataflow resource + if dst.Type == v1alpha1.ComponentDataflow { + dstDataflow, err := k8sutil.LookupNifiDataflow(r.Client, dst.Name, dst.Namespace) + if err != nil { + return nil, err + } + + dstClusterRef = dstDataflow.Spec.ClusterRef + } + + // Check that the source and the destination reference the same cluster + if !v1.ClusterRefsEquals([]v1.ClusterReference{srcClusterRef, dstClusterRef}) { + return nil, errors.New(fmt.Sprintf("Source cluster %s in %s is different from Destination cluster %s in %s", + srcClusterRef.Name, srcClusterRef.Namespace, + dstClusterRef.Name, dstClusterRef.Namespace)) + } + + return &srcClusterRef, nil +} + +// Retrieve port information from a NifiDataflow +func (r *NifiConnectionReconciler) GetDataflowComponentInformation(c v1alpha1.ComponentReference, isSource bool) (*v1alpha1.ComponentInformation, error) { + var portType string = "input" + if isSource { + portType = "output" + } + r.Log.Debug("Retrieve the dataflow port information", + zap.String("dataflowName", c.Name), + zap.String("dataflowNamespace", c.Namespace), + zap.String("portName", c.SubName), + zap.String("portType", portType)) + + instance, err := k8sutil.LookupNifiDataflow(r.Client, c.Name, c.Namespace) + if err != nil { + return nil, err + } else { + // Prepare cluster connection configurations + var clientConfig *clientconfig.NifiConfig + var clusterConnect clientconfig.ClusterConnect + + // Get the client config manager associated to the cluster ref. + clusterRef := instance.Spec.ClusterRef + clusterRef.Namespace = GetClusterRefNamespace(instance.Namespace, instance.Spec.ClusterRef) + configManager := config.GetClientConfigManager(r.Client, clusterRef) + + // Generate the connect object + if clusterConnect, err = configManager.BuildConnect(); err != nil { + return nil, err + } + + // Generate the client configuration. + clientConfig, err = configManager.BuildConfig() + if err != nil { + return nil, err + } + + // Ensure the cluster is ready to receive actions + if !clusterConnect.IsReady(r.Log) { + return nil, errors.New(fmt.Sprintf("Cluster %s in %s not ready for dataflow %s in %s", clusterRef.Name, clusterRef.Namespace, instance.Name, instance.Namespace)) + } + + dataflowInformation, err := dataflow.GetDataflowInformation(instance, clientConfig) + if err != nil { + return nil, err + } + + // Error if the dataflow does not exist + if dataflowInformation == nil { + return nil, errors.New(fmt.Sprintf("Dataflow %s in %s does not exist in the cluster", instance.Name, instance.Namespace)) + } + + // Retrieve the ports + var ports = []nifi.PortEntity{} + if isSource { + ports = dataflowInformation.ProcessGroupFlow.Flow.OutputPorts + } else { + ports = dataflowInformation.ProcessGroupFlow.Flow.InputPorts + } + + // Error if no port exists in the dataflow + if len(ports) == 0 { + return nil, errors.New(fmt.Sprintf("No port available for Dataflow %s in %s", instance.Name, instance.Namespace)) + } + + // Search the targeted port + targetPort := nifi.PortEntity{} + foundTarget := false + for _, port := range ports { + if port.Component.Name == c.SubName { + targetPort = port + foundTarget = true + } + } + + // Error if the targeted port is not found + if !foundTarget { + return nil, errors.New(fmt.Sprintf("Port %s not found: %s in %s", c.SubName, instance.Name, instance.Namespace)) + } + + // Return all the information on the targetted port of the dataflow + information := &v1alpha1.ComponentInformation{ + Id: targetPort.Id, + Type: targetPort.Component.Type_, + GroupId: targetPort.Component.ParentGroupId, + ParentGroupId: dataflowInformation.ProcessGroupFlow.ParentGroupId, + ClusterRef: clusterRef, + } + return information, nil + } +} + +// Set the maintenance label to force the stop of a port +func (r *NifiConnectionReconciler) StopDataflowComponent(ctx context.Context, c v1alpha1.ComponentReference, isSource bool) error { + var portType string = "input" + if isSource { + portType = "output" + } + r.Log.Debug("Set label to stop the port of the dataflow", + zap.String("dataflowName", c.Name), + zap.String("dataflowNamespace", c.Namespace), + zap.String("portName", c.SubName), + zap.String("portType", portType)) + + // Retrieve K8S Dataflow object + instance, err := k8sutil.LookupNifiDataflow(r.Client, c.Name, c.Namespace) + instanceOriginal := instance.DeepCopy() + if err != nil { + return err + } else { + labels := instance.GetLabels() + + // Check that the label is not already set with a different value + if !isSource { + if label, ok := labels[nifiutil.StopInputPortLabel]; ok { + if label != c.SubName { + return errors.New(fmt.Sprintf("Label %s is already set on the NifiDataflow %s", nifiutil.StopInputPortLabel, instance.Name)) + } + } else { + labels[nifiutil.StopInputPortLabel] = c.SubName + instance.SetLabels(labels) + return r.Client.Patch(ctx, instance, client.MergeFrom(instanceOriginal)) + } + } else { + // Set the label + if label, ok := labels[nifiutil.StopOutputPortLabel]; ok { + if label != c.SubName { + return errors.New(fmt.Sprintf("Label %s is already set on the NifiDataflow %s", nifiutil.StopOutputPortLabel, instance.Name)) + } + } else { + labels[nifiutil.StopOutputPortLabel] = c.SubName + instance.SetLabels(labels) + return r.Client.Patch(ctx, instance, client.MergeFrom(instanceOriginal)) + } + } + } + return nil +} + +// Unset the maintenance label to force the stop of a port +func (r *NifiConnectionReconciler) UnStopDataflowComponent(ctx context.Context, c v1alpha1.ComponentReference, isSource bool) error { + r.Log.Debug("Unset label to stop the port of the dataflow", + zap.String("dataflowName", c.Name), + zap.String("dataflowNamespace", c.Namespace)) + + // Retrieve K8S Dataflow object + instance, err := k8sutil.LookupNifiDataflow(r.Client, c.Name, c.Namespace) + instanceOriginal := instance.DeepCopy() + if err != nil { + return err + } else { + // Set the label + labels := instance.GetLabels() + + if !isSource { + // If the label is set with the correct value, delete it + if label, ok := labels[nifiutil.StopInputPortLabel]; ok { + if label == c.SubName { + delete(labels, nifiutil.StopInputPortLabel) + } + } + } else { + // If the label is set with the correct value, delete it + if label, ok := labels[nifiutil.StopOutputPortLabel]; ok { + if label == c.SubName { + delete(labels, nifiutil.StopOutputPortLabel) + } + } + } + + instance.SetLabels(labels) + return r.Client.Patch(ctx, instance, client.MergeFrom(instanceOriginal)) + } +} + +// Set the maintenance label to force the start of a dataflow +func (r *NifiConnectionReconciler) ForceStartDataflowComponent(ctx context.Context, c v1alpha1.ComponentReference) error { + r.Log.Debug("Set label to force the start of the dataflow", + zap.String("dataflowName", c.Name), + zap.String("dataflowNamespace", c.Namespace)) + + // Retrieve K8S Dataflow object + instance, err := k8sutil.LookupNifiDataflow(r.Client, c.Name, c.Namespace) + instanceOriginal := instance.DeepCopy() + if err != nil { + return err + } else { + labels := instance.GetLabels() + // Check that the label is not already set with a different value + if label, ok := labels[nifiutil.ForceStartLabel]; ok { + if label != "true" { + return errors.New(fmt.Sprintf("Label %s is already set on the NifiDataflow %s", nifiutil.StopInputPortLabel, instance.Name)) + } + } else { + // Set the label + labels[nifiutil.ForceStartLabel] = "true" + instance.SetLabels(labels) + return r.Client.Patch(ctx, instance, client.MergeFrom(instanceOriginal)) + } + } + return nil +} + +// Unset the maintenance label to force the start of a dataflow +func (r *NifiConnectionReconciler) UnForceStartDataflowComponent(ctx context.Context, c v1alpha1.ComponentReference) error { + r.Log.Debug("Unset label to force the start of the dataflow", + zap.String("dataflowName", c.Name), + zap.String("dataflowNamespace", c.Namespace)) + + // Retrieve K8S Dataflow object + instance, err := k8sutil.LookupNifiDataflow(r.Client, c.Name, c.Namespace) + instanceOriginal := instance.DeepCopy() + if err != nil { + return err + } else { + // Unset the label + labels := instance.GetLabels() + + delete(labels, nifiutil.ForceStartLabel) + + instance.SetLabels(labels) + return r.Client.Patch(ctx, instance, client.MergeFrom(instanceOriginal)) + } +} diff --git a/controllers/nifidataflow_controller.go b/controllers/nifidataflow_controller.go index 486205fef3..49bd1ef202 100644 --- a/controllers/nifidataflow_controller.go +++ b/controllers/nifidataflow_controller.go @@ -28,6 +28,8 @@ import ( "emperror.dev/errors" "github.com/banzaicloud/k8s-objectmatcher/patch" "github.com/konpyutaika/nifikop/pkg/clientwrappers/dataflow" + "github.com/konpyutaika/nifikop/pkg/clientwrappers/inputport" + "github.com/konpyutaika/nifikop/pkg/clientwrappers/outputport" "github.com/konpyutaika/nifikop/pkg/errorfactory" "github.com/konpyutaika/nifikop/pkg/k8sutil" "github.com/konpyutaika/nifikop/pkg/nificlient/config" @@ -39,12 +41,13 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/reconcile" + nifiutil "github.com/konpyutaika/nifikop/pkg/util/nifi" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) -var dataflowFinalizer = "nifidataflows.nifi.konpyutaika.com/finalizer" +var dataflowFinalizer string = fmt.Sprintf("nifidataflows.%s/finalizer", v1.GroupVersion.Group) // NifiDataflowReconciler reconciles a NifiDataflow object type NifiDataflowReconciler struct { @@ -84,6 +87,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueWithError(r.Log, err.Error(), err) } + patchInstance := client.MergeFrom(instance.DeepCopy()) // Get the last configuration viewed by the operator. o, _ := patch.DefaultAnnotator.GetOriginalConfiguration(instance) // Create it if not exist. @@ -91,7 +95,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(instance); err != nil { return RequeueWithError(r.Log, "could not apply last state to annotation for dataflow "+instance.Name, err) } - if err := r.Client.Update(ctx, instance); err != nil { + if err := r.Client.Patch(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow "+instance.Name, err) } o, _ = patch.DefaultAnnotator.GetOriginalConfiguration(instance) @@ -100,6 +104,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request // Check if the cluster reference changed. original := &v1.NifiDataflow{} current := instance.DeepCopy() + patchCurrent := client.MergeFrom(current.DeepCopy()) json.Unmarshal(o, original) if !v1.ClusterRefsEquals([]v1.ClusterReference{original.Spec.ClusterRef, instance.Spec.ClusterRef}) { instance.Spec.ClusterRef = original.Spec.ClusterRef @@ -119,7 +124,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Dataflow is already gone, there is nothing we can do", zap.String("dataflow", instance.Name)) - if err = r.removeFinalizer(ctx, instance); err != nil { + if err = r.removeFinalizer(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer for dataflow "+instance.Name, err) } return Reconciled() @@ -146,7 +151,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { r.Log.Info("Dataflow context is already gone, there is nothing we can do", zap.String("dataflow", instance.Name)) - if err = r.removeFinalizer(ctx, instance); err != nil { + if err = r.removeFinalizer(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer for dataflow "+instance.Name, err) } return Reconciled() @@ -202,7 +207,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request r.Log.Info("Cluster is already gone, there is nothing we can do", zap.String("clusterName", clusterRef.Name), zap.String("dataflow", instance.Name)) - if err = r.removeFinalizer(ctx, instance); err != nil { + if err = r.removeFinalizer(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer for dataflow "+instance.Name, err) } return Reconciled() @@ -213,7 +218,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { return RequeueWithError(r.Log, "could not apply last state to annotation for dataflow "+instance.Name, err) } - if err := r.Client.Update(ctx, current); err != nil { + if err := r.Client.Patch(ctx, current, patchCurrent); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow with updated NifiCluster reference "+instance.Name, err) } return RequeueAfter(interval) @@ -233,7 +238,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request r.Recorder.Event(instance, corev1.EventTypeWarning, "ReferenceClusterError", msg) // the cluster is gone, so just remove the finalizer if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - if err = r.removeFinalizer(ctx, instance); err != nil { + if err = r.removeFinalizer(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, fmt.Sprintf("failed to remove finalizer from NifiDataflow %s", instance.Name), err) } return Reconciled() @@ -242,9 +247,85 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return RequeueWithError(r.Log, "failed to create HTTP client the for referenced cluster", err) } + // Maintenance operation(s) via label + // Check if maintenance operation is needed + var maintenanceOpNeeded bool = false + for labelKey := range instance.Labels { + if labelKey == nifiutil.StopInputPortLabel || labelKey == nifiutil.StopOutputPortLabel || + labelKey == nifiutil.ForceStartLabel || labelKey == nifiutil.ForceStopLabel { + maintenanceOpNeeded = true + } + } + + // Maintenance operation is needed + if maintenanceOpNeeded { + r.Recorder.Event(instance, corev1.EventTypeNormal, "MaintenanceOperationInProgress", + fmt.Sprintf("Syncing dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", + instance.Name, instance.Spec.BucketId, + instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) + + dataflowInformation, err := dataflow.GetDataflowInformation(instance, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to get NifiDataflow information", err) + } else { + if labelValue, ok := instance.Labels[nifiutil.ForceStopLabel]; ok { + // Stop dataflow operation + if labelValue == "true" { + err = dataflow.UnscheduleDataflow(instance, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to stop dataflow "+instance.Name, err) + } + } + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + } else if labelValue, ok := instance.Labels[nifiutil.ForceStartLabel]; ok { + // Start dataflow operation + if labelValue == "true" { + err = dataflow.ScheduleDataflow(instance, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to start dataflow "+instance.Name, err) + } + } + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + } else { + if labelValue, ok := instance.Labels[nifiutil.StopInputPortLabel]; ok { + // Stop input port operation + for _, port := range dataflowInformation.ProcessGroupFlow.Flow.InputPorts { + if port.Component.Name == labelValue { + _, err := inputport.StopPort(port, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to stop input port "+labelValue, err) + } + } + } + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + } + if labelValue, ok := instance.Labels[nifiutil.StopOutputPortLabel]; ok { + // Stop output port operation + for _, port := range dataflowInformation.ProcessGroupFlow.Flow.OutputPorts { + if port.Component.Name == labelValue { + _, err := outputport.StopPort(port, clientConfig) + if err != nil { + return RequeueWithError(r.Log, "failed to stop output port "+labelValue, err) + } + } + } + return reconcile.Result{ + RequeueAfter: interval / 3, + }, nil + } + } + } + } + // Check if marked for deletion and if so run finalizers if k8sutil.IsMarkedForDeletion(instance.ObjectMeta) { - return r.checkFinalizers(ctx, instance, clientConfig) + return r.checkFinalizers(ctx, instance, clientConfig, patchInstance) } // Ensure the cluster is ready to receive actions @@ -272,7 +353,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if err := patch.DefaultAnnotator.SetLastAppliedAnnotation(current); err != nil { return RequeueWithError(r.Log, "could not apply last state to annotation for dataflow "+instance.Name, err) } - if err := r.Client.Update(ctx, current); err != nil { + if err := r.Client.Patch(ctx, current, patchCurrent); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow "+instance.Name, err) } return RequeueAfter(interval) @@ -283,8 +364,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request return Reconciled() } - r.Recorder.Event(instance, corev1.EventTypeWarning, "Reconciling", - fmt.Sprintf("Reconciling failed dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", + r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciling", + fmt.Sprintf("Reconciling dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) @@ -314,8 +395,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request instance.Status = *processGroupStatus instance.Status.State = v1.DataflowStateCreated - if err := r.updateStatus(ctx, instance, current.Status); err != nil { - return RequeueWithError(r.Log, "failed to update status for NifiDataflow "+instance.Name, err) + if err := r.patchStatus(ctx, instance, patchInstance, current.Status); err != nil { + return RequeueWithError(r.Log, "failed to patch status for NifiDataflow "+instance.Name, err) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Created", @@ -333,7 +414,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Push any changes - if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { + if instance, err = r.updateAndFetchLatest(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow "+current.Name, err) } @@ -351,8 +432,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request status, err := dataflow.SyncDataflow(instance, clientConfig, registryClient, parameterContext) if status != nil { instance.Status = *status - if err := r.updateStatus(ctx, instance, current.Status); err != nil { - return RequeueWithError(r.Log, "failed to update status for NifiDataflow "+instance.Name, err) + if err := r.patchStatus(ctx, instance, patchInstance, current.Status); err != nil { + return RequeueWithError(r.Log, "failed to patch status for NifiDataflow "+instance.Name, err) } } if err != nil { @@ -387,8 +468,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } instance.Status.State = v1.DataflowStateInSync - if err := r.updateStatus(ctx, instance, current.Status); err != nil { - return RequeueWithError(r.Log, "failed to update status for NifiDataflow "+instance.Name, err) + if err := r.patchStatus(ctx, instance, patchInstance, current.Status); err != nil { + return RequeueWithError(r.Log, "failed to patch status for NifiDataflow "+instance.Name, err) } r.Recorder.Event(instance, corev1.EventTypeNormal, "Synchronized", @@ -405,8 +486,8 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if isOutOfSink { instance.Status.State = v1.DataflowStateOutOfSync - if err := r.updateStatus(ctx, instance, current.Status); err != nil { - return RequeueWithError(r.Log, "failed to update status for NifiDataflow "+instance.Name, err) + if err := r.patchStatus(ctx, instance, patchInstance, current.Status); err != nil { + return RequeueWithError(r.Log, "failed to patch status for NifiDataflow "+instance.Name, err) } return Requeue() } @@ -449,7 +530,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request if instance.Status.State != v1.DataflowStateRan { instance.Status.State = v1.DataflowStateRan - if err := r.updateStatus(ctx, instance, current.Status); err != nil { + if err := r.patchStatus(ctx, instance, patchInstance, current.Status); err != nil { return RequeueWithError(r.Log, "failed to update status for NifiDataflow "+instance.Name, err) } r.Log.Info("Successfully ran dataflow", @@ -470,12 +551,12 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request } // Ensure NifiCluster label - if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance); err != nil { + if instance, err = r.ensureClusterLabel(ctx, clusterConnect, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to ensure NifiCluster label on dataflow "+instance.Name, err) } // Push any changes - if instance, err = r.updateAndFetchLatest(ctx, instance); err != nil { + if instance, err = r.updateAndFetchLatest(ctx, instance, patchInstance); err != nil { return RequeueWithError(r.Log, "failed to update NifiDataflow "+current.Name, err) } @@ -484,7 +565,7 @@ func (r *NifiDataflowReconciler) Reconcile(ctx context.Context, req ctrl.Request zap.String("flowId", instance.Spec.FlowId), zap.String("dataflow", instance.Name)) - r.Recorder.Event(instance, corev1.EventTypeWarning, "Reconciled", + r.Recorder.Event(instance, corev1.EventTypeNormal, "Reconciled", fmt.Sprintf("Success fully ensured dataflow %s based on flow {bucketId : %s, flowId: %s, version: %s}", instance.Name, instance.Spec.BucketId, instance.Spec.FlowId, strconv.FormatInt(int64(*instance.Spec.FlowVersion), 10))) @@ -509,18 +590,18 @@ func (r *NifiDataflowReconciler) SetupWithManager(mgr ctrl.Manager) error { } func (r *NifiDataflowReconciler) ensureClusterLabel(ctx context.Context, cluster clientconfig.ClusterConnect, - flow *v1.NifiDataflow) (*v1.NifiDataflow, error) { + flow *v1.NifiDataflow, patcher client.Patch) (*v1.NifiDataflow, error) { labels := ApplyClusterReferenceLabel(cluster, flow.GetLabels()) if !reflect.DeepEqual(labels, flow.GetLabels()) { flow.SetLabels(labels) - return r.updateAndFetchLatest(ctx, flow) + return r.updateAndFetchLatest(ctx, flow, patcher) } return flow, nil } func (r *NifiDataflowReconciler) updateAndFetchLatest(ctx context.Context, - flow *v1.NifiDataflow) (*v1.NifiDataflow, error) { + flow *v1.NifiDataflow, patcher client.Patch) (*v1.NifiDataflow, error) { typeMeta := flow.TypeMeta err := r.Client.Update(ctx, flow) @@ -532,7 +613,7 @@ func (r *NifiDataflowReconciler) updateAndFetchLatest(ctx context.Context, } func (r *NifiDataflowReconciler) checkFinalizers(ctx context.Context, flow *v1.NifiDataflow, - config *clientconfig.NifiConfig) (reconcile.Result, error) { + config *clientconfig.NifiConfig, patcher client.Patch) (reconcile.Result, error) { r.Log.Info("NiFi dataflow is marked for deletion", zap.String("dataflow", flow.Name)) var err error @@ -545,7 +626,7 @@ func (r *NifiDataflowReconciler) checkFinalizers(ctx context.Context, flow *v1.N return RequeueWithError(r.Log, "failed to finalize NiFiDataflow "+flow.Name, err) } } - if err = r.removeFinalizer(ctx, flow); err != nil { + if err = r.removeFinalizer(ctx, flow, patcher); err != nil { return RequeueWithError(r.Log, "failed to remove finalizer from dataflow "+flow.Name, err) } } @@ -553,11 +634,11 @@ func (r *NifiDataflowReconciler) checkFinalizers(ctx context.Context, flow *v1.N return Reconciled() } -func (r *NifiDataflowReconciler) removeFinalizer(ctx context.Context, flow *v1.NifiDataflow) error { +func (r *NifiDataflowReconciler) removeFinalizer(ctx context.Context, flow *v1.NifiDataflow, patcher client.Patch) error { r.Log.Info("Removing finalizer for NifiDataflow", zap.String("dataflow", flow.Name)) flow.SetFinalizers(util.StringSliceRemove(flow.GetFinalizers(), dataflowFinalizer)) - _, err := r.updateAndFetchLatest(ctx, flow) + _, err := r.updateAndFetchLatest(ctx, flow, patcher) return err } @@ -589,9 +670,9 @@ func (r *NifiDataflowReconciler) finalizeNifiDataflow(flow *v1.NifiDataflow, con return nil } -func (r *NifiDataflowReconciler) updateStatus(ctx context.Context, flow *v1.NifiDataflow, currentStatus v1.NifiDataflowStatus) error { +func (r *NifiDataflowReconciler) patchStatus(ctx context.Context, flow *v1.NifiDataflow, patchFlow client.Patch, currentStatus v1.NifiDataflowStatus) error { if !reflect.DeepEqual(flow.Status, currentStatus) { - return r.Client.Status().Update(ctx, flow) + return r.Client.Status().Patch(ctx, flow, patchFlow) } return nil } diff --git a/controllers/nifinodegroupautoscaler_controller.go b/controllers/nifinodegroupautoscaler_controller.go index e2dd61dea9..c0b1c41dea 100644 --- a/controllers/nifinodegroupautoscaler_controller.go +++ b/controllers/nifinodegroupautoscaler_controller.go @@ -44,7 +44,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var autoscalerFinalizer = "nifinodegroupautoscalers.nifi.konpyutaika.com/finalizer" +var autoscalerFinalizer string = fmt.Sprintf("nifinodegroupautoscalers.%s/finalizer", v1alpha1.GroupVersion.Group) // NifiNodeGroupAutoscalerReconciler reconciles a NifiNodeGroupAutoscaler object type NifiNodeGroupAutoscalerReconciler struct { diff --git a/controllers/nifiparametercontext_controller.go b/controllers/nifiparametercontext_controller.go index 5ca2a2f0df..d8233255a7 100644 --- a/controllers/nifiparametercontext_controller.go +++ b/controllers/nifiparametercontext_controller.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var parameterContextFinalizer = "nifiparametercontexts.nifi.konpyutaika.com/finalizer" +var parameterContextFinalizer = fmt.Sprintf("nifiparametercontexts.%s/finalizer", v1.GroupVersion.Group) // NifiParameterContextReconciler reconciles a NifiParameterContext object type NifiParameterContextReconciler struct { diff --git a/controllers/nifiregistryclient_controller.go b/controllers/nifiregistryclient_controller.go index 998efcbd5b..bbd05e9ad1 100644 --- a/controllers/nifiregistryclient_controller.go +++ b/controllers/nifiregistryclient_controller.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var registryClientFinalizer = "nifiregistryclients.nifi.konpyutaika.com/finalizer" +var registryClientFinalizer = fmt.Sprintf("nifiregistryclients.%s/finalizer", v1.GroupVersion.Group) // NifiRegistryClientReconciler reconciles a NifiRegistryClient object type NifiRegistryClientReconciler struct { diff --git a/controllers/nifiuser_controller.go b/controllers/nifiuser_controller.go index 089ea370ad..f620d20c31 100644 --- a/controllers/nifiuser_controller.go +++ b/controllers/nifiuser_controller.go @@ -44,7 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var userFinalizer = "nifiusers.nifi.konpyutaika.com/finalizer" +var userFinalizer = fmt.Sprintf("nifiusers.%s/finalizer", v1.GroupVersion.Group) // NifiUserReconciler reconciles a NifiUser object type NifiUserReconciler struct { diff --git a/controllers/nifiusergroup_controller.go b/controllers/nifiusergroup_controller.go index a51b94770e..0e25712e43 100644 --- a/controllers/nifiusergroup_controller.go +++ b/controllers/nifiusergroup_controller.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var userGroupFinalizer = "nifiusergroups.nifi.konpyutaika.com/finalizer" +var userGroupFinalizer = fmt.Sprintf("nifiusergroups.%s/finalizer", v1.GroupVersion.Group) // NifiUserGroupReconciler reconciles a NifiUserGroup object type NifiUserGroupReconciler struct { diff --git a/go.mod b/go.mod index 62829c1fcd..a940863f2f 100644 --- a/go.mod +++ b/go.mod @@ -16,17 +16,32 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.27.4 github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible + github.com/spf13/cobra v1.6.1 github.com/stretchr/testify v1.8.2 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20230321023759-10a507213a29 k8s.io/api v0.26.3 k8s.io/apimachinery v0.26.3 + k8s.io/cli-runtime v0.26.0 k8s.io/client-go v0.26.3 k8s.io/klog/v2 v2.90.1 sigs.k8s.io/controller-runtime v0.14.6 ) -require go.uber.org/goleak v1.2.1 // indirect +require ( + github.com/go-errors/errors v1.0.1 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/xlab/treeprint v1.1.0 // indirect + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.uber.org/goleak v1.2.1 // indirect + sigs.k8s.io/kustomize/api v0.12.1 // indirect + sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect +) require ( github.com/beorn7/perks v1.0.1 // indirect @@ -45,6 +60,7 @@ require ( github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -52,15 +68,18 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect + github.com/olekukonko/tablewriter v0.0.5 + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.5 go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.10.0 // indirect golang.org/x/net v0.14.0 // indirect diff --git a/go.sum b/go.sum index 57b827a7c1..c5fe3266c5 100644 --- a/go.sum +++ b/go.sum @@ -23,10 +23,14 @@ github.com/cert-manager/cert-manager v1.11.0/go.mod h1:JCy2jvRi3Kp+qnRfw8TVYkOoc github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -56,6 +60,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -103,6 +109,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -119,16 +127,22 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo= github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jarcoal/httpmock v1.3.0 h1:2RJ8GP0IIaWwcC9Fp2BmVi8Kog3v2Hn7VXM3fTd+nuc= github.com/jarcoal/httpmock v1.3.0/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -141,8 +155,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konpyutaika/nigoapi v0.0.8 h1:eOp5Bj7v3PStEqmQrqETncyi5ZRg9k0ooEIZAp71GbU= -github.com/konpyutaika/nigoapi v0.0.8/go.mod h1:ylinFOIwDzQvM+BbiDFWmr6l2Lexs2pg/jb9znIVAv4= github.com/konpyutaika/nigoapi v0.0.9 h1:6BGGs07JuMgUUVeFNJrQIC/kWqPA4Zt/Jxf+rxdebHg= github.com/konpyutaika/nigoapi v0.0.9/go.mod h1:tI7IUufKquth2lnB8Lk4SDPkyaXbv1DlGy5NrjuY/VU= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -154,9 +166,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxatome/go-testdeep v1.12.0 h1:Ql7Go8Tg0C1D/uMMX59LAoYK7LffeJQ6X2T04nTH68g= @@ -168,6 +185,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -175,6 +194,8 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -191,6 +212,8 @@ github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible h1:Jd6xfriVlJ6hWPvYOE0Ni0QWcNTLRehfGPFxr3eSL80= github.com/pavel-v-chernykh/keystore-go v2.1.0+incompatible/go.mod h1:xlUlxe/2ItGlQyMTstqeDv9r3U4obH7xYd26TbDQutY= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -205,16 +228,24 @@ github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -228,9 +259,13 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= +github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -272,14 +307,10 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -293,6 +324,7 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -304,13 +336,9 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -318,8 +346,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -373,8 +399,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -409,6 +433,8 @@ k8s.io/apiextensions-apiserver v0.26.3/go.mod h1:jdA5MdjNWGP+njw1EKMZc64xAT5fIhN k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/cli-runtime v0.26.0 h1:aQHa1SyUhpqxAw1fY21x2z2OS5RLtMJOCj7tN4oq8mw= +k8s.io/cli-runtime v0.26.0/go.mod h1:o+4KmwHzO/UK0wepE1qpRk6l3o60/txUZ1fEXWGIKTY= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= k8s.io/component-base v0.26.3 h1:oC0WMK/ggcbGDTkdcqefI4wIZRYdK3JySx9/HADpV0g= @@ -429,6 +455,10 @@ sigs.k8s.io/gateway-api v0.6.2 h1:583XHiX2M2bKEA0SAdkoxL1nY73W1+/M+IAm8LJvbEA= sigs.k8s.io/gateway-api v0.6.2/go.mod h1:EYJT+jlPWTeNskjV0JTki/03WX1cyAnBhwBJfYHpV/0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= +sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= +sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= +sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= diff --git a/helm/nifikop/crds/nifi.konpyutaika.com_nificonnections.yaml b/helm/nifikop/crds/nifi.konpyutaika.com_nificonnections.yaml new file mode 100644 index 0000000000..d49d338439 --- /dev/null +++ b/helm/nifikop/crds/nifi.konpyutaika.com_nificonnections.yaml @@ -0,0 +1,138 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: nificonnections.nifi.konpyutaika.com +spec: + group: nifi.konpyutaika.com + names: + kind: NifiConnection + listKind: NifiConnectionList + plural: nificonnections + singular: nificonnection + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + configuration: + properties: + backPressureDataSizeThreshold: + default: 1 GB + type: string + backPressureObjectThreshold: + default: 10000 + format: int64 + type: integer + bends: + items: + properties: + posX: + format: int64 + type: integer + posY: + format: int64 + type: integer + type: object + type: array + flowFileExpiration: + type: string + labelIndex: + format: int32 + type: integer + loadBalanceCompression: + default: DO_NOT_COMPRESS + enum: + - DO_NOT_COMPRESS + - COMPRESS_ATTRIBUTES_ONLY + - COMPRESS_ATTRIBUTES_AND_CONTENT + type: string + loadBalancePartitionAttribute: + type: string + loadBalanceStrategy: + default: DO_NOT_LOAD_BALANCE + enum: + - DO_NOT_LOAD_BALANCE + - PARTITION_BY_ATTRIBUTE + - ROUND_ROBIN + - SINGLE + type: string + prioritizers: + items: + enum: + - FirstInFirstOutPrioritizer + - NewestFlowFileFirstPrioritizer + - OldestFlowFileFirstPrioritizer + - PriorityAttributePrioritizer + type: string + type: array + type: object + destination: + properties: + name: + type: string + namespace: + type: string + subName: + type: string + type: + enum: + - dataflow + type: string + required: + - name + - type + type: object + source: + properties: + name: + type: string + namespace: + type: string + subName: + type: string + type: + enum: + - dataflow + type: string + required: + - name + - type + type: object + updateStrategy: + enum: + - drop + - drain + type: string + required: + - destination + - source + - updateStrategy + type: object + status: + properties: + connectionID: + type: string + state: + type: string + required: + - connectionID + - state + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/helm/nifikop/templates/role.yaml b/helm/nifikop/templates/role.yaml index 27ff3cff65..b08f49f7c3 100644 --- a/helm/nifikop/templates/role.yaml +++ b/helm/nifikop/templates/role.yaml @@ -87,6 +87,7 @@ rules: - "nifiregistryclients" - "nifiparametercontexts" - "nifinodegroupautoscalers" + - "nificonnections" verbs: - create - delete @@ -134,6 +135,7 @@ rules: - nifiregistryclients/status - nifiparametercontexts/status - nifinodegroupautoscalers/status + - nificonnections/status verbs: - get - update diff --git a/main.go b/main.go index 6e427ecb77..0eed182a86 100644 --- a/main.go +++ b/main.go @@ -199,6 +199,18 @@ func main() { os.Exit(1) } + if err = (&controllers.NifiConnectionReconciler{ + Client: mgr.GetClient(), + Log: *logger.Named("controllers").Named("NifiConnection"), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("nifi-connection"), + RequeueInterval: multipliers.RegistryClientRequeueInterval, + RequeueOffset: multipliers.RequeueOffset, + }).SetupWithManager(mgr); err != nil { + logger.Error("unable to create controller", zap.String("controller", "NifiConnection"), zap.Error(err)) + os.Exit(1) + } + if webhookEnabled { if err = (&v1alpha1.NifiUser{}).SetupWebhookWithManager(mgr); err != nil { logger.Error("unable to create webhook", zap.String("webhook", "NifiUser"), zap.Error(err)) diff --git a/pkg/clientwrappers/connection/connection.go b/pkg/clientwrappers/connection/connection.go new file mode 100644 index 0000000000..a7217bfb04 --- /dev/null +++ b/pkg/clientwrappers/connection/connection.go @@ -0,0 +1,302 @@ +package connection + +import ( + "github.com/konpyutaika/nifikop/api/v1alpha1" + "github.com/konpyutaika/nifikop/pkg/errorfactory" + "github.com/konpyutaika/nifikop/pkg/nificlient" + "github.com/konpyutaika/nifikop/pkg/util" + "github.com/konpyutaika/nifikop/pkg/util/clientconfig" + + "github.com/konpyutaika/nifikop/pkg/clientwrappers" + "github.com/konpyutaika/nifikop/pkg/common" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" +) + +var log = common.CustomLogger().Named("connection-method") + +// CreateConnection will deploy the NifiDataflow on NiFi Cluster. +func CreateConnection(connection *v1alpha1.NifiConnection, source *v1alpha1.ComponentInformation, destination *v1alpha1.ComponentInformation, + config *clientconfig.NifiConfig) (*v1alpha1.NifiConnectionStatus, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + var bends []nigoapi.PositionDto + for _, bend := range connection.Spec.Configuration.GetBends() { + bends = append(bends, nigoapi.PositionDto{ + X: float64(*bend.X), + Y: float64(*bend.Y), + }) + } + + var defaultVersion int64 = 0 + connectionEntity := nigoapi.ConnectionEntity{ + Revision: &nigoapi.RevisionDto{ + Version: &defaultVersion, + }, + Component: &nigoapi.ConnectionDto{ + Name: connection.Name, + ParentGroupId: source.ParentGroupId, + Source: &nigoapi.ConnectableDto{ + Id: source.Id, + Type_: source.Type, + GroupId: source.GroupId, + }, + Destination: &nigoapi.ConnectableDto{ + Id: destination.Id, + Type_: destination.Type, + GroupId: destination.GroupId, + }, + FlowFileExpiration: connection.Spec.Configuration.GetFlowFileExpiration(), + BackPressureDataSizeThreshold: connection.Spec.Configuration.BackPressureDataSizeThreshold, + BackPressureObjectThreshold: connection.Spec.Configuration.BackPressureObjectThreshold, + LoadBalanceStrategy: string(connection.Spec.Configuration.LoadBalanceStrategy), + LoadBalancePartitionAttribute: connection.Spec.Configuration.GetLoadBalancePartitionAttribute(), + LoadBalanceCompression: string(connection.Spec.Configuration.LoadBalancePartitionAttribute), + Prioritizers: connection.Spec.Configuration.GetStringPrioritizers(), + LabelIndex: connection.Spec.Configuration.GetLabelIndex(), + Bends: bends, + }, + } + + entity, err := nClient.CreateConnection(connectionEntity) + if err := clientwrappers.ErrorCreateOperation(log, err, "Create connection"); err != nil { + return nil, err + } + + return &v1alpha1.NifiConnectionStatus{ConnectionId: entity.Id}, nil +} + +// GetConnectionInformation retrieve the connection information. +func GetConnectionInformation(connection *v1alpha1.NifiConnection, config *clientconfig.NifiConfig) (*nigoapi.ConnectionEntity, error) { + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + if err == nificlient.ErrNifiClusterReturned404 { + return nil, nil + } + return nil, err + } + + return connectionEntity, nil +} + +// ConnectionExist check if the NifiConnection exist on NiFi Cluster. +func ConnectionExist(connection *v1alpha1.NifiConnection, config *clientconfig.NifiConfig) (bool, error) { + if connection.Status.ConnectionId == "" { + return false, nil + } + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return false, err + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + if err == nificlient.ErrNifiClusterReturned404 { + return false, nil + } + return false, err + } + + return connectionEntity != nil, nil +} + +// SyncConnectionConfig implements the logic to sync a NifiConnection config with the deployed connection config. +func SyncConnectionConfig(connection *v1alpha1.NifiConnection, + source *v1alpha1.ComponentInformation, destination *v1alpha1.ComponentInformation, + config *clientconfig.NifiConfig) (*v1alpha1.NifiConnectionStatus, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + return nil, err + } + + if isSourceChanged(connectionEntity, source) { + return &connection.Status, errorfactory.NifiConnectionDeleting{} + } + + if isConfigurationChanged(connectionEntity, connection) { + connectionEntity.Component.Name = connection.Name + + var bends []nigoapi.PositionDto + for _, bend := range connection.Spec.Configuration.GetBends() { + bends = append(bends, nigoapi.PositionDto{ + X: float64(*bend.X), + Y: float64(*bend.Y), + }) + } + + connectionEntity.Component.FlowFileExpiration = connection.Spec.Configuration.GetFlowFileExpiration() + connectionEntity.Component.BackPressureDataSizeThreshold = connection.Spec.Configuration.BackPressureDataSizeThreshold + connectionEntity.Component.BackPressureObjectThreshold = connection.Spec.Configuration.BackPressureObjectThreshold + connectionEntity.Component.LoadBalanceStrategy = string(connection.Spec.Configuration.LoadBalanceStrategy) + connectionEntity.Component.LoadBalancePartitionAttribute = connection.Spec.Configuration.GetLoadBalancePartitionAttribute() + connectionEntity.Component.LoadBalanceCompression = string(connection.Spec.Configuration.LoadBalanceCompression) + connectionEntity.Component.Prioritizers = connection.Spec.Configuration.GetStringPrioritizers() + connectionEntity.Component.LabelIndex = connection.Spec.Configuration.GetLabelIndex() + connectionEntity.Component.Bends = bends + + _, err := nClient.UpdateConnection(*connectionEntity) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Update connection"); err != nil { + return nil, err + } + return &connection.Status, errorfactory.NifiConnectionSyncing{} + } + + if isDestinationChanged(connectionEntity, destination) { + _, err := SyncConnectionDestination(connection, destination, config) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Update connection"); err != nil { + return nil, err + } + return &connection.Status, errorfactory.NifiConnectionSyncing{} + } + + return &connection.Status, nil +} + +// IsOutOfSyncConnection control if the deployed connection is out of sync with the NifiConnection resource. +func IsOutOfSyncConnection(connection *v1alpha1.NifiConnection, + source *v1alpha1.ComponentInformation, destination *v1alpha1.ComponentInformation, + config *clientconfig.NifiConfig) (bool, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return false, err + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + return false, err + } + + return isConfigurationChanged(connectionEntity, connection) || isSourceChanged(connectionEntity, source) || + isDestinationChanged(connectionEntity, destination), nil +} + +// isConfigurationChanged control if the deployed connection configuration is out of sync. +func isConfigurationChanged(connectionEntity *nigoapi.ConnectionEntity, connection *v1alpha1.NifiConnection) bool { + var bends []nigoapi.PositionDto + for _, bend := range connection.Spec.Configuration.GetBends() { + bends = append(bends, nigoapi.PositionDto{ + X: float64(*bend.X), + Y: float64(*bend.Y), + }) + } + + return connectionEntity.Component.FlowFileExpiration != connection.Spec.Configuration.GetFlowFileExpiration() || + connectionEntity.Component.BackPressureDataSizeThreshold != connection.Spec.Configuration.BackPressureDataSizeThreshold || + connectionEntity.Component.BackPressureObjectThreshold != connection.Spec.Configuration.BackPressureObjectThreshold || + connectionEntity.Component.LoadBalanceStrategy != string(connection.Spec.Configuration.LoadBalanceStrategy) || + connectionEntity.Component.LoadBalancePartitionAttribute != connection.Spec.Configuration.GetLoadBalancePartitionAttribute() || + connectionEntity.Component.LoadBalanceCompression != string(connection.Spec.Configuration.LoadBalanceCompression) || + !util.StringSliceStrictCompare(connectionEntity.Component.Prioritizers, connection.Spec.Configuration.GetStringPrioritizers()) || + connectionEntity.Component.LabelIndex != connection.Spec.Configuration.GetLabelIndex() || + isBendChanged(connectionEntity.Component.Bends, bends) +} + +// isBendChanged control if the deployed connection bends are out of sync. +func isBendChanged(current []nigoapi.PositionDto, original []nigoapi.PositionDto) bool { + if len(current) != len(original) { + return true + } + + for i, posC := range current { + if posC.X != original[i].X || posC.Y != original[i].Y { + return true + } + } + + return false +} + +// isSourceChanged control if the deployed connection source is out of sync. +func isSourceChanged( + connectionEntity *nigoapi.ConnectionEntity, + source *v1alpha1.ComponentInformation) bool { + + return connectionEntity.Component.Source.Id != source.Id || connectionEntity.Component.Source.GroupId != source.GroupId || + connectionEntity.Component.Source.Type_ != source.Type +} + +// isSourceChanged control if the deployed connection destination is out of sync. +func isDestinationChanged( + connectionEntity *nigoapi.ConnectionEntity, + destination *v1alpha1.ComponentInformation) bool { + + return connectionEntity.Component.Destination.Id != destination.Id || connectionEntity.Component.Destination.GroupId != destination.GroupId || + connectionEntity.Component.Destination.Type_ != destination.Type +} + +// SyncConnectionDestination implements the logic to sync a NifiConnection with the deployed connection destination. +func SyncConnectionDestination(connection *v1alpha1.NifiConnection, destination *v1alpha1.ComponentInformation, + config *clientconfig.NifiConfig) (*v1alpha1.NifiConnectionStatus, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + return nil, err + } + + connectionEntity.Component.Destination.Id = destination.Id + connectionEntity.Component.Destination.Type_ = destination.Type + connectionEntity.Component.Destination.GroupId = destination.GroupId + + _, err = nClient.UpdateConnection(*connectionEntity) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Update connection"); err != nil { + return nil, err + } + return &connection.Status, nil +} + +// DeleteConnection implements the logic to delete a connection. +func DeleteConnection(connection *v1alpha1.NifiConnection, config *clientconfig.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil + } + + connectionEntity, err := nClient.GetConnection(connection.Status.ConnectionId) + if err := clientwrappers.ErrorGetOperation(log, err, "Get connection"); err != nil { + return err + } + + err = nClient.DeleteConnection(*connectionEntity) + if err := clientwrappers.ErrorCreateOperation(log, err, "Remove process-group"); err != nil { + return err + } + + return nil +} + +// DropConnectionFlowFiles implements the logic to drop the flowfiles from a connection. +func DropConnectionFlowFiles(connection *v1alpha1.NifiConnection, + config *clientconfig.NifiConfig) error { + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil + } + + _, err = nClient.CreateDropRequest(connection.Status.ConnectionId) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Create drop-request"); err != nil { + return err + } + + return nil +} diff --git a/pkg/clientwrappers/dataflow/dataflow.go b/pkg/clientwrappers/dataflow/dataflow.go index 78b86a1c00..c1eeec949a 100644 --- a/pkg/clientwrappers/dataflow/dataflow.go +++ b/pkg/clientwrappers/dataflow/dataflow.go @@ -56,6 +56,27 @@ func RootProcessGroup(config *clientconfig.NifiConfig) (string, error) { return rootPg.ProcessGroupFlow.Id, nil } +func GetDataflowInformation(flow *v1.NifiDataflow, config *clientconfig.NifiConfig) (*nigoapi.ProcessGroupFlowEntity, error) { + if flow.Status.ProcessGroupID == "" { + return nil, nil + } + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + flowEntity, err := nClient.GetFlow(flow.Status.ProcessGroupID) + if err := clientwrappers.ErrorGetOperation(log, err, "Get flow"); err != nil { + if err == nificlient.ErrNifiClusterReturned404 { + return nil, nil + } + return nil, err + } + + return flowEntity, nil +} + // CreateDataflow will deploy the NifiDataflow on NiFi Cluster func CreateDataflow(flow *v1.NifiDataflow, config *clientconfig.NifiConfig, registry *v1.NifiRegistryClient) (*v1.NifiDataflowStatus, error) { @@ -518,12 +539,12 @@ func prepareUpdatePG(flow *v1.NifiDataflow, config *clientconfig.NifiConfig) (*v for _, connection := range connections { if connection.Status.AggregateSnapshot.FlowFilesQueued != 0 { dropRequest, err := nClient.CreateDropRequest(connection.Id) - if err := clientwrappers.ErrorCreateOperation(log, err, "Create drop-request"); err != nil { + if err := clientwrappers.ErrorUpdateOperation(log, err, "Create drop-request"); err != nil { return nil, err } flow.Status.LatestDropRequest = - dropRequest2Status(flow.Status.LatestDropRequest.ConnectionId, dropRequest) + dropRequest2Status(connection.Id, dropRequest) return &flow.Status, errorfactory.NifiConnectionDropping{} } @@ -548,7 +569,7 @@ func prepareUpdatePG(flow *v1.NifiDataflow, config *clientconfig.NifiConfig) (*v return nil, err } - // list input port + // Unlist all processors with input connections for _, connection := range connections { processors = removeProcessor(processors, connection.DestinationId) } @@ -687,6 +708,53 @@ func processGroupFromFlow( return nil } +// // inputConnectionFromFlow retrieve all input connection from a list of input ports +// func inputConnectionFromFlow(flowEntity *nigoapi.ProcessGroupFlowEntity, +// inputPorts []nigoapi.PortEntity) []nigoapi.ConnectionEntity { +// var connections []nigoapi.ConnectionEntity + +// for _, connection := range flowEntity.ProcessGroupFlow.Flow.Connections { +// for _, inputPort := range inputPorts { +// if connection.DestinationId == inputPort.Id { +// connections = append(connections, connection) +// } +// } +// } + +// return connections +// } + +// // hasInputConnectionsActive will determine if a flow has input connections that are still active +// func hasInputConnectionsActive(flow *v1.NifiDataflow, config *clientconfig.NifiConfig) (*bool, error) { +// var connections []nigoapi.ConnectionEntity + +// nClient, err := common.NewClusterConnection(log, config) +// if err != nil { +// return nil, err +// } + +// flowEntity, err := nClient.GetFlow(flow.Status.ProcessGroupID) +// if err != nil { +// return nil, err +// } + +// parentFlowEntity, err := nClient.GetFlow(flowEntity.ProcessGroupFlow.ParentGroupId) +// if err != nil { +// return nil, err +// } + +// connections = inputConnectionFromFlow(parentFlowEntity, flowEntity.ProcessGroupFlow.Flow.InputPorts) + +// var hasConnectionActive bool = false +// for _, inputConnection := range connections { +// if inputConnection.Status.AggregateSnapshot.FlowFilesQueued > 0 || inputConnection.Component.Source.Running { +// hasConnectionActive = true +// } +// } + +// return &hasConnectionActive, nil +// } + // listComponents will get all ProcessGroups, Processors, Connections and Ports recursively func listComponents(config *clientconfig.NifiConfig, processGroupID string) ([]nigoapi.ProcessGroupEntity, []nigoapi.ProcessorEntity, []nigoapi.ConnectionEntity, []nigoapi.PortEntity, error) { @@ -701,7 +769,10 @@ func listComponents(config *clientconfig.NifiConfig, return processGroups, processors, connections, inputPorts, err } - flowEntity, _ := nClient.GetFlow(processGroupID) + flowEntity, err := nClient.GetFlow(processGroupID) + if err != nil { + return processGroups, processors, connections, inputPorts, err + } flow := flowEntity.ProcessGroupFlow.Flow processGroups = flow.ProcessGroups @@ -840,3 +911,23 @@ func removeProcessor(processors []nigoapi.ProcessorEntity, toRemoveId string) [] return tmp } + +// Check if a dataflow contains flowfile +func IsDataflowEmpty(flow *v1.NifiDataflow, config *clientconfig.NifiConfig) (bool, error) { + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return false, err + } + + flowEntity, err := nClient.GetFlow(flow.Spec.GetParentProcessGroupID(config.RootProcessGroupId)) + if err := clientwrappers.ErrorGetOperation(log, err, "Get flow"); err != nil { + return false, err + } + + pgEntity := processGroupFromFlow(flowEntity, flow) + if pgEntity == nil { + return false, errorfactory.NifiFlowDraining{} + } + + return pgEntity.Status.AggregateSnapshot.FlowFilesQueued == 0, nil +} diff --git a/pkg/clientwrappers/inputport/inputport.go b/pkg/clientwrappers/inputport/inputport.go new file mode 100644 index 0000000000..4eefd81bca --- /dev/null +++ b/pkg/clientwrappers/inputport/inputport.go @@ -0,0 +1,49 @@ +package inputport + +import ( + "github.com/konpyutaika/nifikop/pkg/util/clientconfig" + + "github.com/konpyutaika/nifikop/pkg/clientwrappers" + "github.com/konpyutaika/nifikop/pkg/common" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" +) + +var log = common.CustomLogger().Named("inputport-method") + +// StopPort will the port on NiFi +func StopPort(port nigoapi.PortEntity, config *clientconfig.NifiConfig) (*nigoapi.ProcessorStatusDto, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + entity, err := nClient.UpdateInputPortRunStatus(port.Id, nigoapi.PortRunStatusEntity{ + Revision: port.Revision, + State: "STOPPED", + }) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Stop input port"); err != nil { + return nil, err + } + + return entity.Status, nil +} + +// StartPort will the port on NiFi +func StartPort(port nigoapi.PortEntity, config *clientconfig.NifiConfig) (*nigoapi.ProcessorStatusDto, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + entity, err := nClient.UpdateInputPortRunStatus(port.Id, nigoapi.PortRunStatusEntity{ + Revision: port.Revision, + State: "RUNNING", + }) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Start input port"); err != nil { + return nil, err + } + + return entity.Status, nil +} diff --git a/pkg/clientwrappers/outputport/outputport.go b/pkg/clientwrappers/outputport/outputport.go new file mode 100644 index 0000000000..191eea22e3 --- /dev/null +++ b/pkg/clientwrappers/outputport/outputport.go @@ -0,0 +1,49 @@ +package outputport + +import ( + "github.com/konpyutaika/nifikop/pkg/util/clientconfig" + + "github.com/konpyutaika/nifikop/pkg/clientwrappers" + "github.com/konpyutaika/nifikop/pkg/common" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" +) + +var log = common.CustomLogger().Named("outputport-method") + +// StopPort will the port on NiFi +func StopPort(port nigoapi.PortEntity, config *clientconfig.NifiConfig) (*nigoapi.ProcessorStatusDto, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + entity, err := nClient.UpdateOutputPortRunStatus(port.Id, nigoapi.PortRunStatusEntity{ + Revision: port.Revision, + State: "STOPPED", + }) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Stop output port"); err != nil { + return nil, err + } + + return entity.Status, nil +} + +// StartPort will the port on NiFi +func StartPort(port nigoapi.PortEntity, config *clientconfig.NifiConfig) (*nigoapi.ProcessorStatusDto, error) { + + nClient, err := common.NewClusterConnection(log, config) + if err != nil { + return nil, err + } + + entity, err := nClient.UpdateOutputPortRunStatus(port.Id, nigoapi.PortRunStatusEntity{ + Revision: port.Revision, + State: "RUNNING", + }) + if err := clientwrappers.ErrorUpdateOperation(log, err, "Start output port"); err != nil { + return nil, err + } + + return entity.Status, nil +} diff --git a/pkg/errorfactory/errorfactory.go b/pkg/errorfactory/errorfactory.go index 867b175727..3ee66ffa2e 100644 --- a/pkg/errorfactory/errorfactory.go +++ b/pkg/errorfactory/errorfactory.go @@ -77,7 +77,7 @@ type NifiFlowUpdateRequestNotFound struct{ error } // NifiFlowControllerServiceScheduling states that the flow's controller service are still scheduling type NifiFlowControllerServiceScheduling struct{ error } -// NifiFlowSyncing states that the flow's controller service are still scheduling +// NifiFlowSyncing states that the flow is still syncing type NifiFlowSyncing struct{ error } // NifiFlowScheduling states that the flow is still scheduling @@ -89,6 +89,12 @@ type NifiReportingTasksValidating struct{ error } // NifiReportingTasksInvalid states that the reporting task is invalid type NifiReportingTasksInvalid struct{ error } +// NifiConnectionSyncing states that the connection is still syncing +type NifiConnectionSyncing struct{ error } + +// NifiConnectionDeleting states that the connection is still deleting +type NifiConnectionDeleting struct{ error } + // New creates a new error factory error func New(t interface{}, err error, msg string, wrapArgs ...interface{}) error { wrapped := errors.WrapIfWithDetails(err, msg, wrapArgs...) diff --git a/pkg/k8sutil/annotator.go b/pkg/k8sutil/annotator.go new file mode 100644 index 0000000000..0a52bb667d --- /dev/null +++ b/pkg/k8sutil/annotator.go @@ -0,0 +1,118 @@ +package k8sutil + +import ( + "archive/zip" + "bytes" + "encoding/base64" + "io" + "net/http" + + meta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +var metadataAccessor = meta.NewAccessor() + +// SetAnnotation set an annotation +func SetAnnotation(annotationKey string, obj runtime.Object, value []byte) error { + if len(value) < 1 { + return nil + } + + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return err + } + + if annots == nil { + annots = map[string]string{} + } + + annots[annotationKey], err = zipAndBase64EncodeAnnotation(value) + if err != nil { + return err + } + return metadataAccessor.SetAnnotations(obj, annots) +} + +// GetAnnotation get a annotation +func GetAnnotation(annotationKey string, obj runtime.Object) ([]byte, error) { + annots, err := metadataAccessor.Annotations(obj) + if err != nil { + return nil, err + } + + if annots == nil { + return nil, nil + } + + original, ok := annots[annotationKey] + if !ok { + return nil, nil + } + + // Try to base64 decode, and fallback to non-base64 encoded content for backwards compatibility. + if decoded, err := base64.StdEncoding.DecodeString(original); err == nil { + if http.DetectContentType(decoded) == "application/zip" { + return unZipAnnotation(decoded) + } + return decoded, nil + } + + return []byte(original), nil +} + +func zipAndBase64EncodeAnnotation(original []byte) (string, error) { + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + f, err := w.Create("original") + if err != nil { + return "", err + } + _, err = f.Write(original) + if err != nil { + return "", err + } + + // Make sure to check the error on Close. + err = w.Close() + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(buf.Bytes()), nil +} + +func unZipAnnotation(original []byte) ([]byte, error) { + annotation, err := io.ReadAll(bytes.NewReader(original)) + if err != nil { + return nil, err + } + + zipReader, err := zip.NewReader(bytes.NewReader(annotation), int64(len(annotation))) + if err != nil { + return nil, err + } + + // Read the file from zip archive + zipFile := zipReader.File[0] + unzippedFileBytes, err := readZipFile(zipFile) + if err != nil { + return nil, err + } + + return unzippedFileBytes, nil +} + +func readZipFile(zf *zip.File) ([]byte, error) { + f, err := zf.Open() + if err != nil { + return nil, err + } + defer f.Close() + return io.ReadAll(f) +} diff --git a/pkg/k8sutil/lookup.go b/pkg/k8sutil/lookup.go index c396c606e2..0ab3f23091 100644 --- a/pkg/k8sutil/lookup.go +++ b/pkg/k8sutil/lookup.go @@ -2,7 +2,8 @@ package k8sutil import ( "context" - "github.com/konpyutaika/nifikop/api/v1" + + v1 "github.com/konpyutaika/nifikop/api/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" @@ -43,3 +44,10 @@ func LookupNifiUser(client runtimeClient.Client, userName, userNamespace string) err = client.Get(context.TODO(), types.NamespacedName{Name: userName, Namespace: userNamespace}, user) return } + +// LookupNifiDataflow returns the dataflow instance based on its name and namespace +func LookupNifiDataflow(client runtimeClient.Client, dataflowName, dataflowNamespace string) (dataflow *v1.NifiDataflow, err error) { + dataflow = &v1.NifiDataflow{} + err = client.Get(context.TODO(), types.NamespacedName{Name: dataflowName, Namespace: dataflowNamespace}, dataflow) + return +} diff --git a/pkg/nificlient/client.go b/pkg/nificlient/client.go index 2ae531a8e8..30993d9685 100644 --- a/pkg/nificlient/client.go +++ b/pkg/nificlient/client.go @@ -72,6 +72,7 @@ type NifiClient interface { CreateProcessGroup(entity nigoapi.ProcessGroupEntity, pgParentId string) (*nigoapi.ProcessGroupEntity, error) UpdateProcessGroup(entity nigoapi.ProcessGroupEntity) (*nigoapi.ProcessGroupEntity, error) RemoveProcessGroup(entity nigoapi.ProcessGroupEntity) error + CreateConnection(entity nigoapi.ConnectionEntity) (*nigoapi.ConnectionEntity, error) // Version func CreateVersionUpdateRequest(pgId string, entity nigoapi.VersionControlInformationEntity) (*nigoapi.VersionedFlowUpdateRequestEntity, error) @@ -86,9 +87,15 @@ type NifiClient interface { // Processor func UpdateProcessor(entity nigoapi.ProcessorEntity) (*nigoapi.ProcessorEntity, error) UpdateProcessorRunStatus(id string, entity nigoapi.ProcessorRunStatusEntity) (*nigoapi.ProcessorEntity, error) + GetProcessor(id string) (*nigoapi.ProcessorEntity, error) // Input port func UpdateInputPortRunStatus(id string, entity nigoapi.PortRunStatusEntity) (*nigoapi.ProcessorEntity, error) + GetInputPort(id string) (*nigoapi.PortEntity, error) + + // Output port func + UpdateOutputPortRunStatus(id string, entity nigoapi.PortRunStatusEntity) (*nigoapi.ProcessorEntity, error) + GetOutputPort(id string) (*nigoapi.PortEntity, error) // Parameter context func GetParameterContexts() ([]nigoapi.ParameterContextEntity, error) @@ -129,6 +136,11 @@ type NifiClient interface { GetControllerConfig() (*nigoapi.ControllerConfigurationEntity, error) UpdateControllerConfig(entity nigoapi.ControllerConfigurationEntity) (*nigoapi.ControllerConfigurationEntity, error) + // Connections func + GetConnection(id string) (*nigoapi.ConnectionEntity, error) + UpdateConnection(entity nigoapi.ConnectionEntity) (*nigoapi.ConnectionEntity, error) + DeleteConnection(entity nigoapi.ConnectionEntity) error + Build() error } diff --git a/pkg/nificlient/connection.go b/pkg/nificlient/connection.go new file mode 100644 index 0000000000..3edc973728 --- /dev/null +++ b/pkg/nificlient/connection.go @@ -0,0 +1,62 @@ +package nificlient + +import ( + "strconv" + + "github.com/antihax/optional" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" + "go.uber.org/zap" +) + +func (n *nifiClient) GetConnection(id string) (*nigoapi.ConnectionEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to get the connection informations + connectionEntity, rsp, body, err := client.ConnectionsApi.GetConnection(context, id) + if err := errorGetOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &connectionEntity, nil +} + +func (n *nifiClient) UpdateConnection(entity nigoapi.ConnectionEntity) (*nigoapi.ConnectionEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to update the connection informations + connectionEntity, rsp, body, err := client.ConnectionsApi.UpdateConnection(context, entity.Id, entity) + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &connectionEntity, nil +} + +func (n *nifiClient) DeleteConnection(entity nigoapi.ConnectionEntity) error { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to delete the connection + _, rsp, body, err := client.ConnectionsApi.DeleteConnection( + context, + entity.Id, + &nigoapi.ConnectionsApiDeleteConnectionOpts{ + Version: optional.NewString(strconv.FormatInt(*entity.Revision.Version, 10)), + }) + + return errorDeleteOperation(rsp, body, err, n.log) +} diff --git a/pkg/nificlient/connection_test.go b/pkg/nificlient/connection_test.go new file mode 100644 index 0000000000..865d173730 --- /dev/null +++ b/pkg/nificlient/connection_test.go @@ -0,0 +1,182 @@ +package nificlient + +import ( + "fmt" + "net/http" + "testing" + + "github.com/jarcoal/httpmock" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" + "github.com/stretchr/testify/assert" +) + +func TestGetConnection(t *testing.T) { + assert := assert.New(t) + + id := "5f1f9f7e-0183-1000-ffff-ffffa1b9c8d5" + mockEntity := MockConnection(id, "test-unit", "5a859dfd-0183-1000-b22b-680e3b6fb507", + "41481c3b-a836-37fa-84d1-06e57a6dc2d8", "OUTPUT_PORT", "5eee3064-0183-1000-0000-00004b62d089", + "b760a6ed-1421-37d6-813d-94cf7cb03524", "INPUT_PORT", "5eee26c7-0183-1000-ffff-fffffc99fdef", + "1 hour", "10 GB", "DO_NOT_LOAD_BALANCE", "", "DO_NOT_COMPRESS", + 1000, []string{}, 0, []nigoapi.PositionDto{{X: 0, Y: 0}}) + + entity, err := testGetConnection(t, id, &mockEntity, 200) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testGetConnection(t, id, &mockEntity, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testGetConnection(t, id, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testGetConnection(t *testing.T, id string, entity *nigoapi.ConnectionEntity, status int) (*nigoapi.ConnectionEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/connections/%s", id)) + httpmock.RegisterResponder(http.MethodGet, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.GetConnection(id) +} + +func TestUpdateConnection(t *testing.T) { + assert := assert.New(t) + + mockEntity := MockConnection("5f1f9f7e-0183-1000-ffff-ffffa1b9c8d5", "test-unit", "5a859dfd-0183-1000-b22b-680e3b6fb507", + "41481c3b-a836-37fa-84d1-06e57a6dc2d8", "OUTPUT_PORT", "5eee3064-0183-1000-0000-00004b62d089", + "b760a6ed-1421-37d6-813d-94cf7cb03524", "INPUT_PORT", "5eee26c7-0183-1000-ffff-fffffc99fdef", + "1 hour", "10 GB", "DO_NOT_LOAD_BALANCE", "", "DO_NOT_COMPRESS", + 1000, []string{}, 0, []nigoapi.PositionDto{{X: 0, Y: 0}}) + + entity, err := testUpdateConnection(t, &mockEntity, 200) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testUpdateConnection(t, &mockEntity, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testUpdateConnection(t, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testUpdateConnection(t *testing.T, entity *nigoapi.ConnectionEntity, status int) (*nigoapi.ConnectionEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/connections/%s", entity.Id)) + httpmock.RegisterResponder(http.MethodPut, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.UpdateConnection(*entity) +} + +func TestDeleteConnection(t *testing.T) { + assert := assert.New(t) + + mockEntity := MockConnection("5f1f9f7e-0183-1000-ffff-ffffa1b9c8d5", "test-unit", "5a859dfd-0183-1000-b22b-680e3b6fb507", + "41481c3b-a836-37fa-84d1-06e57a6dc2d8", "OUTPUT_PORT", "5eee3064-0183-1000-0000-00004b62d089", + "b760a6ed-1421-37d6-813d-94cf7cb03524", "INPUT_PORT", "5eee26c7-0183-1000-ffff-fffffc99fdef", + "1 hour", "10 GB", "DO_NOT_LOAD_BALANCE", "", "DO_NOT_COMPRESS", + 1000, []string{}, 0, []nigoapi.PositionDto{{X: 0, Y: 0}}) + + err := testDeleteConnection(t, &mockEntity, 200) + assert.Nil(err) + + err = testDeleteConnection(t, &mockEntity, 404) + assert.Nil(err) + + err = testDeleteConnection(t, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) +} + +func testDeleteConnection(t *testing.T, entity *nigoapi.ConnectionEntity, status int) error { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/connections/%s", entity.Id)) + httpmock.RegisterResponder(http.MethodDelete, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.DeleteConnection(*entity) +} + +func MockConnection( + id, name, parentId, srcId, srcType, srcGroupId, dstId, dstType, dstGroupId, + flowfileExpiration, backPressureDataSizeThreshold, loadBalanceStrategy, loadBalancePartitionAttribute, loadBalanceCompression string, + backPressureObjectThreshold int64, + prioritizers []string, + labelIndex int32, bends []nigoapi.PositionDto) nigoapi.ConnectionEntity { + + var version int64 = 10 + return nigoapi.ConnectionEntity{ + Id: id, + Component: &nigoapi.ConnectionDto{ + Name: name, + Id: id, + ParentGroupId: parentId, + Source: &nigoapi.ConnectableDto{ + Id: srcId, + Type_: srcType, + GroupId: srcGroupId, + }, + Destination: &nigoapi.ConnectableDto{ + Id: dstId, + Type_: dstType, + GroupId: dstGroupId, + }, + FlowFileExpiration: flowfileExpiration, + BackPressureDataSizeThreshold: backPressureDataSizeThreshold, + BackPressureObjectThreshold: backPressureObjectThreshold, + LoadBalanceStrategy: loadBalanceStrategy, + LoadBalancePartitionAttribute: loadBalancePartitionAttribute, + LoadBalanceCompression: loadBalanceCompression, + Prioritizers: prioritizers, + LabelIndex: labelIndex, + Bends: bends, + }, + Revision: &nigoapi.RevisionDto{Version: &version}, + } +} diff --git a/pkg/nificlient/flowfiles.go b/pkg/nificlient/flowfiles.go index 5e7fcdcb27..0a9dbc74c3 100644 --- a/pkg/nificlient/flowfiles.go +++ b/pkg/nificlient/flowfiles.go @@ -32,7 +32,7 @@ func (n *nifiClient) CreateDropRequest(connectionId string) (*nigoapi.DropReques // Request on Nifi Rest API to create the drop Request entity, rsp, body, err := client.FlowfileQueuesApi.CreateDropRequest(context, connectionId) - if err := errorCreateOperation(rsp, body, err, n.log); err != nil { + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { return nil, err } diff --git a/pkg/nificlient/flowfiles_test.go b/pkg/nificlient/flowfiles_test.go index 63eac08122..b5f6caeda6 100644 --- a/pkg/nificlient/flowfiles_test.go +++ b/pkg/nificlient/flowfiles_test.go @@ -18,7 +18,7 @@ func TestCreateDropRequest(t *testing.T) { "16cfd2ec-0174-1000-0000-00004b9b35cc", connectionId, "", "", "", 50, 10, 15, 5, false) - entity, err := testCreateDropRequest(t, &mockEntity, connectionId, 201) + entity, err := testCreateDropRequest(t, &mockEntity, connectionId, 202) assert.Nil(err) assert.NotNil(entity) diff --git a/pkg/nificlient/inputport.go b/pkg/nificlient/inputport.go index 0739349033..a3ad880554 100644 --- a/pkg/nificlient/inputport.go +++ b/pkg/nificlient/inputport.go @@ -21,3 +21,20 @@ func (n *nifiClient) UpdateInputPortRunStatus(id string, entity nigoapi.PortRunS return &processor, nil } + +func (n *nifiClient) GetInputPort(id string) (*nigoapi.PortEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to update the input port informations + port, rsp, body, err := client.InputPortsApi.GetInputPort(context, id) + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &port, nil +} diff --git a/pkg/nificlient/inputport_test.go b/pkg/nificlient/inputport_test.go index d613cdd89e..fba82851c2 100644 --- a/pkg/nificlient/inputport_test.go +++ b/pkg/nificlient/inputport_test.go @@ -10,27 +10,69 @@ import ( "github.com/stretchr/testify/assert" ) -func TestUpdatePortRunStatus(t *testing.T) { +func TestGetInputPort(t *testing.T) { + assert := assert.New(t) + + id := "41481c3b-a836-37fa-84d1-06e57a6dc2d8" + mockEntity := MockInputPort(id, "test-unit", "5eee3064-0183-1000-0000-00004b62d089", 0, 0) + + entity, err := testGetInputPort(t, id, &mockEntity, 200) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testGetInputPort(t, id, &mockEntity, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testGetInputPort(t, id, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testGetInputPort(t *testing.T, id string, entity *nigoapi.PortEntity, status int) (*nigoapi.PortEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/output-ports/%s", id)) + httpmock.RegisterResponder(http.MethodGet, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.GetOutputPort(id) +} + +func TestUpdateInputPortRunStatus(t *testing.T) { assert := assert.New(t) id := "16cfd2ec-0174-1000-0000-00004b9b35cc" - mockEntity := MockPortRunStatus("Stopped") + mockEntity := MockInputPortRunStatus("Stopped") - entity, err := testUpdatePortRunStatus(t, mockEntity, id, 200) + entity, err := testUpdateInputPortRunStatus(t, mockEntity, id, 200) assert.Nil(err) assert.NotNil(entity) - entity, err = testUpdatePortRunStatus(t, mockEntity, id, 404) + entity, err = testUpdateInputPortRunStatus(t, mockEntity, id, 404) assert.IsType(ErrNifiClusterReturned404, err) assert.Nil(entity) - entity, err = testUpdatePortRunStatus(t, mockEntity, id, 500) + entity, err = testUpdateInputPortRunStatus(t, mockEntity, id, 500) assert.IsType(ErrNifiClusterNotReturned200, err) assert.Nil(entity) } -func testUpdatePortRunStatus(t *testing.T, entity nigoapi.PortRunStatusEntity, id string, status int) (*nigoapi.ProcessorEntity, error) { +func testUpdateInputPortRunStatus(t *testing.T, entity nigoapi.PortRunStatusEntity, id string, status int) (*nigoapi.ProcessorEntity, error) { cluster := testClusterMock(t) @@ -53,10 +95,28 @@ func testUpdatePortRunStatus(t *testing.T, entity nigoapi.PortRunStatusEntity, i return client.UpdateInputPortRunStatus(id, entity) } -func MockPortRunStatus(state string) nigoapi.PortRunStatusEntity { +func MockInputPortRunStatus(state string) nigoapi.PortRunStatusEntity { var version int64 = 10 return nigoapi.PortRunStatusEntity{ Revision: &nigoapi.RevisionDto{Version: &version}, State: state, } } + +func MockInputPort(id, name, parentId string, posX, posY float64) nigoapi.PortEntity { + var version int64 = 10 + return nigoapi.PortEntity{ + Id: id, + Component: &nigoapi.PortDto{ + Id: id, + Name: name, + ParentGroupId: parentId, + Type_: "INPUT_PORT", + Position: &nigoapi.PositionDto{ + X: posX, + Y: posY, + }, + }, + Revision: &nigoapi.RevisionDto{Version: &version}, + } +} diff --git a/pkg/nificlient/outputport.go b/pkg/nificlient/outputport.go new file mode 100644 index 0000000000..f0f8cd975b --- /dev/null +++ b/pkg/nificlient/outputport.go @@ -0,0 +1,40 @@ +package nificlient + +import ( + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" + "go.uber.org/zap" +) + +func (n *nifiClient) UpdateOutputPortRunStatus(id string, entity nigoapi.PortRunStatusEntity) (*nigoapi.ProcessorEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to update the output port run status + processor, rsp, body, err := client.OutputPortsApi.UpdateRunStatus(context, id, entity) + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &processor, nil +} + +func (n *nifiClient) GetOutputPort(id string) (*nigoapi.PortEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to update the output port informations + port, rsp, body, err := client.OutputPortsApi.GetOutputPort(context, id) + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &port, nil +} diff --git a/pkg/nificlient/outputport_test.go b/pkg/nificlient/outputport_test.go new file mode 100644 index 0000000000..5f85195848 --- /dev/null +++ b/pkg/nificlient/outputport_test.go @@ -0,0 +1,122 @@ +package nificlient + +import ( + "fmt" + "net/http" + "testing" + + "github.com/jarcoal/httpmock" + nigoapi "github.com/konpyutaika/nigoapi/pkg/nifi" + "github.com/stretchr/testify/assert" +) + +func TestGetOutputPort(t *testing.T) { + assert := assert.New(t) + + id := "41481c3b-a836-37fa-84d1-06e57a6dc2d8" + mockEntity := MockOutputPort(id, "test-unit", "5eee3064-0183-1000-0000-00004b62d089", 0, 0) + + entity, err := testGetOutputPort(t, id, &mockEntity, 200) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testGetOutputPort(t, id, &mockEntity, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testGetOutputPort(t, id, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testGetOutputPort(t *testing.T, id string, entity *nigoapi.PortEntity, status int) (*nigoapi.PortEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/output-ports/%s", id)) + httpmock.RegisterResponder(http.MethodGet, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.GetOutputPort(id) +} + +func TestUpdateOutputPortRunStatus(t *testing.T) { + assert := assert.New(t) + + id := "16cfd2ec-0174-1000-0000-00004b9b35cc" + + mockEntity := MockOutputPortRunStatus("Stopped") + + entity, err := testUpdateOutputPortRunStatus(t, mockEntity, id, 200) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testUpdateOutputPortRunStatus(t, mockEntity, id, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testUpdateOutputPortRunStatus(t, mockEntity, id, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testUpdateOutputPortRunStatus(t *testing.T, entity nigoapi.PortRunStatusEntity, id string, status int) (*nigoapi.ProcessorEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/output-ports/%s/run-status", id)) + httpmock.RegisterResponder(http.MethodPut, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.UpdateOutputPortRunStatus(id, entity) +} + +func MockOutputPortRunStatus(state string) nigoapi.PortRunStatusEntity { + var version int64 = 10 + return nigoapi.PortRunStatusEntity{ + Revision: &nigoapi.RevisionDto{Version: &version}, + State: state, + } +} + +func MockOutputPort(id, name, parentId string, posX, posY float64) nigoapi.PortEntity { + var version int64 = 10 + return nigoapi.PortEntity{ + Id: id, + Component: &nigoapi.PortDto{ + Id: id, + Name: name, + ParentGroupId: parentId, + Type_: "OUTPUT_PORT", + Position: &nigoapi.PositionDto{ + X: posX, + Y: posY, + }, + }, + Revision: &nigoapi.RevisionDto{Version: &version}, + } +} diff --git a/pkg/nificlient/processgroup.go b/pkg/nificlient/processgroup.go index c3353cf7a4..48ae143fb0 100644 --- a/pkg/nificlient/processgroup.go +++ b/pkg/nificlient/processgroup.go @@ -83,3 +83,20 @@ func (n *nifiClient) RemoveProcessGroup(entity nigoapi.ProcessGroupEntity) error return errorDeleteOperation(rsp, body, err, n.log) } + +func (n *nifiClient) CreateConnection(entity nigoapi.ConnectionEntity) (*nigoapi.ConnectionEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to create a connection + conEntity, rsp, body, err := client.ProcessGroupsApi.CreateConnection(context, entity.Component.ParentGroupId, entity) + if err := errorCreateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &conEntity, nil +} diff --git a/pkg/nificlient/processgroup_test.go b/pkg/nificlient/processgroup_test.go index 0592dee9a8..e724b84b8d 100644 --- a/pkg/nificlient/processgroup_test.go +++ b/pkg/nificlient/processgroup_test.go @@ -199,6 +199,51 @@ func testRemoveProcessGroup(t *testing.T, entity *nigoapi.ProcessGroupEntity, st return client.RemoveProcessGroup(*entity) } +func TestCreateConnection(t *testing.T) { + assert := assert.New(t) + + mockEntity := MockConnection("5f1f9f7e-0183-1000-ffff-ffffa1b9c8d5", "test-unit", "5a859dfd-0183-1000-b22b-680e3b6fb507", + "41481c3b-a836-37fa-84d1-06e57a6dc2d8", "OUTPUT_PORT", "5eee3064-0183-1000-0000-00004b62d089", + "b760a6ed-1421-37d6-813d-94cf7cb03524", "INPUT_PORT", "5eee26c7-0183-1000-ffff-fffffc99fdef", + "1 hour", "10 GB", "DO_NOT_LOAD_BALANCE", "", "DO_NOT_COMPRESS", + 1000, []string{}, 0, []nigoapi.PositionDto{{X: 0, Y: 0}}) + + entity, err := testCreateConnection(t, &mockEntity, 201) + assert.Nil(err) + assert.NotNil(entity) + + entity, err = testCreateConnection(t, &mockEntity, 404) + assert.IsType(ErrNifiClusterReturned404, err) + assert.Nil(entity) + + entity, err = testCreateConnection(t, &mockEntity, 500) + assert.IsType(ErrNifiClusterNotReturned200, err) + assert.Nil(entity) +} + +func testCreateConnection(t *testing.T, entity *nigoapi.ConnectionEntity, status int) (*nigoapi.ConnectionEntity, error) { + + cluster := testClusterMock(t) + + client, err := testClientFromCluster(cluster, false) + if err != nil { + return nil, err + } + + httpmock.Activate() + defer httpmock.DeactivateAndReset() + + url := nifiAddress(cluster, fmt.Sprintf("/process-groups/%s/connections", entity.Component.ParentGroupId)) + httpmock.RegisterResponder(http.MethodPost, url, + func(req *http.Request) (*http.Response, error) { + return httpmock.NewJsonResponse( + status, + entity) + }) + + return client.CreateConnection(*entity) +} + func MockProcessGroup(id, name, parentPGId, registryId, bucketId, flowId string, flowVersion int32) nigoapi.ProcessGroupEntity { var version int64 = 10 return nigoapi.ProcessGroupEntity{ diff --git a/pkg/nificlient/processor.go b/pkg/nificlient/processor.go index 65631b4d95..543abcca81 100644 --- a/pkg/nificlient/processor.go +++ b/pkg/nificlient/processor.go @@ -24,3 +24,20 @@ func (n *nifiClient) UpdateProcessorRunStatus( return &processor, nil } + +func (n *nifiClient) GetProcessor(id string) (*nigoapi.ProcessorEntity, error) { + // Get nigoapi client, favoring the one associated to the coordinator node. + client, context := n.privilegeCoordinatorClient() + if client == nil { + n.log.Error("Error during creating node client", zap.Error(ErrNoNodeClientsAvailable)) + return nil, ErrNoNodeClientsAvailable + } + + // Request on Nifi Rest API to get the processor informations + processor, rsp, body, err := client.ProcessorsApi.GetProcessor(context, id) + if err := errorUpdateOperation(rsp, body, err, n.log); err != nil { + return nil, err + } + + return &processor, nil +} diff --git a/pkg/plugin/common/client.go b/pkg/plugin/common/client.go new file mode 100644 index 0000000000..643a0d336c --- /dev/null +++ b/pkg/plugin/common/client.go @@ -0,0 +1,56 @@ +package common + +import ( + "fmt" + + "github.com/konpyutaika/nifikop/api/v1alpha1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewClient returns a new controller-runtime client instance +func NewClient(clientConfig clientcmd.ClientConfig) (client.Client, error) { + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("unable to get rest client config: %w", err) + } + + // Create the mapper provider + mapper, err := apiutil.NewDiscoveryRESTMapper(restConfig) + if err != nil { + return nil, fmt.Errorf("unable to to instantiate mapper: %w", err) + } + + // Register NifiKop scheme + if err = v1alpha1.AddToScheme(scheme.Scheme); err != nil { + return nil, fmt.Errorf("unable register DatadogAgent apis: %w", err) + } + + // // Create the Client for Read/Write operations. + var newClient client.Client + newClient, err = client.New(restConfig, client.Options{Scheme: scheme.Scheme, Mapper: mapper}) + if err != nil { + return nil, fmt.Errorf("unable to instantiate client: %w", err) + } + + return newClient, nil +} + +// NewClientset returns a new client-go instance +func NewClientset(clientConfig clientcmd.ClientConfig) (*kubernetes.Clientset, error) { + restConfig, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("unable to get rest client config: %w", err) + } + + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("unable to instantiate client: %w", err) + } + + return clientset, nil +} diff --git a/pkg/plugin/common/options.go b/pkg/plugin/common/options.go new file mode 100644 index 0000000000..f7b45ddce5 --- /dev/null +++ b/pkg/plugin/common/options.go @@ -0,0 +1,79 @@ +package common + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Options encapsulates the common fields of command options +type Options struct { + ConfigFlags *genericclioptions.ConfigFlags + Client client.Client + Clientset *kubernetes.Clientset + UserNamespace string +} + +// Init initialize the common config of command options +func (o *Options) Init(cmd *cobra.Command) error { + clientConfig := o.GetClientConfig() + + client, err := NewClient(clientConfig) + if err != nil { + return fmt.Errorf("unable to instantiate client: %w", err) + } + o.SetClient(client) + + clientset, err := NewClientset(clientConfig) + if err != nil { + return fmt.Errorf("unable to instantiate clientset: %w", err) + } + o.SetClientset(clientset) + + nsConfig, _, err := clientConfig.Namespace() + if err != nil { + return err + } + + nsFlag, err := cmd.Flags().GetString("namespace") + if err != nil { + return err + } + + if nsFlag != "" { + o.SetNamespace(nsFlag) + } else { + o.SetNamespace(nsConfig) + } + + return nil +} + +// SetNamespace configures the namespace +func (o *Options) SetNamespace(ns string) { + o.UserNamespace = ns +} + +// SetClient configures the client +func (o *Options) SetClient(client client.Client) { + o.Client = client +} + +// SetClientset configures the clientset +func (o *Options) SetClientset(clientset *kubernetes.Clientset) { + o.Clientset = clientset +} + +// GetClientConfig returns the client config +func (o *Options) GetClientConfig() clientcmd.ClientConfig { + return o.ConfigFlags.ToRawKubeConfigLoader() +} + +// SetConfigFlags configures the config flags +func (o *Options) SetConfigFlags() { + o.ConfigFlags = genericclioptions.NewConfigFlags(false) +} diff --git a/pkg/util/nifi/common.go b/pkg/util/nifi/common.go index 0387147a5c..b7c60bcc50 100644 --- a/pkg/util/nifi/common.go +++ b/pkg/util/nifi/common.go @@ -22,6 +22,17 @@ var ( NifiDataVolumeMountKey = fmt.Sprintf("%s/nifi-data", v1.GroupVersion.Group) ) +var ( + StopInputPortLabel = fmt.Sprintf("%s/stop-input", v1.GroupVersion.Group) + StopOutputPortLabel = fmt.Sprintf("%s/stop-output", v1.GroupVersion.Group) + ForceStopLabel = fmt.Sprintf("%s/force-stop", v1.GroupVersion.Group) + ForceStartLabel = fmt.Sprintf("%s/force-start", v1.GroupVersion.Group) +) + +var ( + LastAppliedClusterAnnotation = fmt.Sprintf("%s/last-applied-nificluster", v1.GroupVersion.Group) +) + // ParseTimeStampToUnixTime parses the given CC timeStamp to time format func ParseTimeStampToUnixTime(timestamp string) (time.Time, error) { diff --git a/pkg/util/util.go b/pkg/util/util.go index bc322b2e58..4ae9d0c0cb 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -141,6 +141,32 @@ func ConvertMapStringToMapStringPointer(inputMap map[string]string) map[string]* return result } +// StringSliceCompare returns true if the two lists of string are the same +func StringSliceCompare(list1 []string, list2 []string) bool { + if len(list1) != len(list2) { + return false + } + for _, v := range list1 { + if !StringSliceContains(list2, v) { + return false + } + } + return true +} + +// StringSliceStrictCompare returns true if the two lists of string are the same with the order taking into account +func StringSliceStrictCompare(list1 []string, list2 []string) bool { + if len(list1) != len(list2) { + return false + } + for i, v := range list1 { + if list2[i] != v { + return false + } + } + return true +} + // StringSliceContains returns true if list contains s func StringSliceContains(list []string, s string) bool { for _, v := range list { diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go index f10e929083..fc4e2439d9 100644 --- a/pkg/util/util_test.go +++ b/pkg/util/util_test.go @@ -1,9 +1,10 @@ package util import ( - "github.com/konpyutaika/nifikop/api/v1" - corev1 "k8s.io/api/core/v1" "testing" + + v1 "github.com/konpyutaika/nifikop/api/v1" + corev1 "k8s.io/api/core/v1" ) func TestSubtractNodes(t *testing.T) { @@ -108,3 +109,102 @@ func TestMergeHostAliasesEmpty(t *testing.T) { t.Errorf("The merge results are not the correct length: %v+", results) } } + +func TestStringSliceCompare(t *testing.T) { + listOriginal := []string{"a", "b", "c"} + listDisordered := []string{"c", "a", "b"} + listLess := []string{"a", "b"} + listMore := []string{"a", "b", "c", "d"} + listDifferent := []string{"1", "2", "3"} + + // same list + if results := StringSliceCompare(listOriginal, listOriginal); !results { + t.Error("The list must be considered as equal") + } + + // same list but disordered + if results := StringSliceCompare(listOriginal, listDisordered); !results { + t.Error("The list must be considered as equal") + } + + // list with less listLess + if results := StringSliceCompare(listOriginal, listLess); results { + t.Error("The list must be considered as different because there is less items") + } + + // list with more items + if results := StringSliceCompare(listOriginal, listMore); results { + t.Error("The list must be considered as different because there is more items") + } + + // list of same size but different + if results := StringSliceCompare(listOriginal, listDifferent); results { + t.Error("The list must be considered as different because there is no identical items") + } +} + +func TestStringSliceStrictCompare(t *testing.T) { + listOriginal := []string{"a", "b", "c"} + listDisordered := []string{"c", "a", "b"} + listLess := []string{"a", "b"} + listMore := []string{"a", "b", "c", "d"} + listDifferent := []string{"1", "2", "3"} + + // same list + if results := StringSliceStrictCompare(listOriginal, listOriginal); !results { + t.Error("The list must be considered as equal") + } + + // same list but disordered + if results := StringSliceStrictCompare(listOriginal, listDisordered); results { + t.Error("The list must be considered as different because the order is different") + } + + // list with less listLess + if results := StringSliceStrictCompare(listOriginal, listLess); results { + t.Error("The list must be considered as different because there is less items") + } + + // list with more items + if results := StringSliceStrictCompare(listOriginal, listMore); results { + t.Error("The list must be considered as different because there is more items") + } + + // list of same size but different + if results := StringSliceStrictCompare(listOriginal, listDifferent); results { + t.Error("The list must be considered as different because there is no identical items") + } +} + +func TestStringSliceContains(t *testing.T) { + list := []string{"a", "b", "c"} + + // item in the list + if results := StringSliceContains(list, "a"); !results { + t.Error("The item is in the list") + } + + // same list but disordered + if results := StringSliceContains(list, "1"); results { + t.Error("The item is not in the list") + } +} + +func TestStringSliceRemove(t *testing.T) { + list := []string{"a", "b", "c"} + listCopy := make([]string, len(list)) + + copy(listCopy, list) + // item in the list + if results := StringSliceRemove(listCopy, "a"); len(results) != len(list)-1 || + results[0] != list[1] || results[1] != list[2] { + t.Error("The list must have an item less") + } + + copy(listCopy, list) + // item not in the list + if results := StringSliceRemove(listCopy, "1"); len(results) != len(list) || + results[0] != list[0] || results[1] != list[1] || results[2] != list[2] { + t.Error("The list should be the same") + } +} diff --git a/site/docs/1_concepts/2_design_principles.md b/site/docs/1_concepts/2_design_principles.md index dd1fa59c06..7c638ab638 100644 --- a/site/docs/1_concepts/2_design_principles.md +++ b/site/docs/1_concepts/2_design_principles.md @@ -36,7 +36,7 @@ The operator should reflect as much as possible the behavior of the solution we This means that your cluster is not defined by what is deployed on it, and what you deploy on it does not depend on it. To be more explicit, just because I deploy a NiFi cluster doesn't mean the DataFlow deployed on it will stay there, we can move the DataFlow from one cluster to another. -To manage this, we need to create different kinds of resources ([NifiCluster], [NifiDataflow], [NifiParameterContext], [NifiUser], [NifiUserGroup], [NifiRegistryClient], [NifiNodeGroupAutoscaler]) with one controller per resource that will manage its own resource. +To manage this, we need to create different kinds of resources ([NifiCluster], [NifiDataflow], [NifiParameterContext], [NifiUser], [NifiUserGroup], [NifiRegistryClient], [NifiNodeGroupAutoscaler], [NifiConnection]) with one controller per resource that will manage its own resource. In this way, we can say: - I deploy a NiFiCluster @@ -49,4 +49,5 @@ In this way, we can say: [NifiUser]: ../5_references/2_nifi_user [NifiUserGroup]: ../5_references/6_nifi_usergroup [NifiRegistryClient]: ../5_references/3_nifi_registry_client -[NifiNodeGroupAutoscaler]: ../5_references/7_nifi_nodegroup_autoscaler \ No newline at end of file +[NifiNodeGroupAutoscaler]: ../5_references/7_nifi_nodegroup_autoscaler +[NifiConnection]: ../5_references/8_nifi_connection \ No newline at end of file diff --git a/site/docs/2_deploy_nifikop/2_customizable_install_with_helm.md b/site/docs/2_deploy_nifikop/2_customizable_install_with_helm.md index d6ae015acf..753dd8697e 100644 --- a/site/docs/2_deploy_nifikop/2_customizable_install_with_helm.md +++ b/site/docs/2_deploy_nifikop/2_customizable_install_with_helm.md @@ -26,6 +26,7 @@ It will use Custom Ressources Definition CRDs: - `nifiparametercontexts.nifi.konpyutaika.com`, - `nifidataflows.nifi.konpyutaika.com`, - `nifinodegroupautoscalers.nifi.konpyutaika.com`, +- `nificonnections.nifi.konpyutaika.com`, ### Configuration @@ -80,6 +81,7 @@ kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/co kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiparametercontexts.yaml kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifiregistryclients.yaml kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nifinodegroupautoscalers.yaml +kubectl apply -f https://raw.githubusercontent.com/konpyutaika/nifikop/master/config/crd/bases/nifi.konpyutaika.com_nificonnections.yaml ``` ::: diff --git a/site/docs/2_deploy_nifikop/3_kubectl_plugin.md b/site/docs/2_deploy_nifikop/3_kubectl_plugin.md new file mode 100644 index 0000000000..ee5ac8286f --- /dev/null +++ b/site/docs/2_deploy_nifikop/3_kubectl_plugin.md @@ -0,0 +1,34 @@ +--- +id: 3_kubectl_plugin +title: Kubectl Plugin +sidebar_label: Kubectl Plugin +--- + +You can build the plugin and copy the exectuable into your PATH. + +For example on a UNIX machine: + +```console +make kubectl-nifikop && sudo cp ./bin/kubectl-nifikop /usr/local/bin +``` + +Then you can test the plugin: + +```console +$ kubectl nifikop +Usage: + nifikop [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + nificluster + nificonnection + nifidataflow + nifigroupautoscaler + nifiregistryclient + nifiuser + nifiusergroup +``` + +Your NiFiKop plugin is now installed! \ No newline at end of file diff --git a/site/docs/3_manage_nifi/3_manage_dataflows/1_deploy_dataflow.md b/site/docs/3_manage_nifi/3_manage_dataflows/1_deploy_dataflow.md index ce2f053cc0..c5001aa1b5 100644 --- a/site/docs/3_manage_nifi/3_manage_dataflows/1_deploy_dataflow.md +++ b/site/docs/3_manage_nifi/3_manage_dataflows/1_deploy_dataflow.md @@ -34,7 +34,7 @@ Once you have deployed your [NifiRegistryClient], you have the possibility of de This configuration is defined using the [NifiParameterContext] CRD, which NiFiKop will convert into a [Parameter context](https://nifi.apache.org/docs/nifi-docs/html/user-guide.html#parameter-contexts). -Below is an example of [NifiParameterContext] : +Below is an example of [NifiParameterContext]: ```yaml apiVersion: nifi.konpyutaika.com/v1 @@ -118,7 +118,7 @@ You have two modes of control from your dataflow by the operator : 3 - `Spec.SyncMode == always` : The operator will deploy and ensure the dataflow lifecycle, it will avoid all manual modification directly from the Cluster (e.g remove the process group, remove the versioning, update the parent process group, make some local changes ...). If you want to perform update, rollback or stuff like this, you have to simply update the [NifiDataflow] resource. :::important -More information about `Spec.UpdateStrategy` [here](../../5_references/5_nifi_dataflow#dataflowupdatestrategy) +More information about `Spec.UpdateStrategy` [here](../../5_references/5_nifi_dataflow#componentupdatestrategy) ::: [NifiDataflow]: ../../5_references/5_nifi_dataflow diff --git a/site/docs/3_manage_nifi/4_manage_connections/1_deploy_connection.md b/site/docs/3_manage_nifi/4_manage_connections/1_deploy_connection.md new file mode 100644 index 0000000000..77b36d420e --- /dev/null +++ b/site/docs/3_manage_nifi/4_manage_connections/1_deploy_connection.md @@ -0,0 +1,127 @@ +--- +id: 1_deploy_connection +title: Deploy connection +sidebar_label: Deploy connection +--- + +You can create NiFi connections either : + +* directly against the cluster through its REST API (using UI or some home made scripts), or +* via the `NifiConnection` CRD. + +To deploy a [NifiConnection] you have to start by deploying at least 2 [NifiDataflows] because **NiFiKop** manages connection between 2 [NifiDataflows]. + +If you want more details about how to deploy [NifiDataflow], just have a look on the [how to deploy dataflow page](../3_manage_dataflows/1_deploy_dataflow). + +Below is an example of 2 [NifiDataflows] named respectively `input` and `output`: + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiDataflow +metadata: + name: input + namespace: nifikop +spec: + clusterRef: + name: nc + namespace: nifikop + bucketId: deedb9f6-65a4-44e9-a1c9-722008fcd0ba + flowId: ab95431d-980d-41bd-904a-fac4bd864ba6 + flowVersion: 1 + registryClientRef: + name: registry-client-example + namespace: nifikop + skipInvalidComponent: true + skipInvalidControllerService: true + syncMode: always + updateStrategy: drain + flowPosition: + posX: 0 + posY: 0 +--- +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiDataflow +metadata: + name: output + namespace: nifikop +spec: + clusterRef: + name: nc + namespace: nifikop + bucketId: deedb9f6-65a4-44e9-a1c9-722008fcd0ba + flowId: fc5363eb-801e-432f-aa94-488838674d07 + flowVersion: 2 + registryClientRef: + name: registry-client-example + namespace: nifikop + skipInvalidComponent: true + skipInvalidControllerService: true + syncMode: always + updateStrategy: drain + flowPosition: + posX: 750 + posY: 0 +``` + +We will obtain the following initial setup: +![Initial setup](/img/3_tasks/4_manage_connections/1_deploy_connections/initial_setup.jpg) + +:::important +The `input` dataflow must have an `output port` and the `output` dataflow must have an `input port`. +::: + +Now that we have 2 [NifiDataflows], we can connect them with a [NifiConnection]. + +Below is an example of a [NifiConnection] named `connection` between the 2 previously deployed dataflows: + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiConnection +metadata: + name: connection + namespace: nifikop +spec: + source: + name: input + namespace: nifikop + subName: output + type: dataflow + destination: + name: output + namespace: nifikop + subName: input + type: dataflow + configuration: + backPressureDataSizeThreshold: 100 GB + backPressureObjectThreshold: 10000 + flowFileExpiration: 1 hour + labelIndex: 0 + bends: + - posX: 550 + posY: 550 + - posX: 550 + posY: 440 + - posX: 550 + posY: 88 + updateStrategy: drain +``` + +You will obtain the following setup: +![Connection setup](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_setup.jpg) + +The `prioritizers` field takes a list of prioritizers, and the order of the list matters in NiFi so it matters in the resource. + +- `prioriters=[NewestFlowFileFirstPrioritizer, FirstInFirstOutPrioritizer, OldestFlowFileFirstPrioritizer]` ![Connection prioritizers 0](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg) +- `prioriters=[FirstInFirstOutPrioritizer, NewestFlowFileFirstPrioritizer, OldestFlowFileFirstPrioritizer]` ![Connection prioritizers 1](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg) +- `prioriters=[PriorityAttributePrioritizer]` ![Connection prioritizers 2](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg) + +The `labelIndex` field will place the label of the connection according to the bends. +If we take the previous bending configuration, you will get this setup for these labelIndex: + +- `labelIndex=0`: ![Connection labelIndex 0](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_0.jpg) +- `labelIndex=1`: ![Connection labelIndex 1](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_1.jpg) +- `labelIndex=2`: ![Connection labelIndex 2](/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_2.jpg) + +[NifiDataflow]: ../../5_references/5_nifi_dataflow +[NifiDataflows]: ../../5_references/5_nifi_dataflow +[NifiConnection]: ../../5_references/8_nifi_connection \ No newline at end of file diff --git a/site/docs/4_compatibility_versions.md b/site/docs/4_compatibility_versions.md index 8ba796e802..21a939a229 100644 --- a/site/docs/4_compatibility_versions.md +++ b/site/docs/4_compatibility_versions.md @@ -13,7 +13,7 @@ sidebar_label: Compatibility versions |----------------------------|---------|-----------| | Cluster deployment | Yes | Yes | | Standalone deployment | No | No | -| Cluster nodes configuration | Yes | Yes | +| Cluster nodes configuration| Yes | Yes | | Cluster rolling upgrade | Yes | Yes | | Cluster scaling | Yes | Yes | | Cluster auto-scaling | Yes | Yes | @@ -68,6 +68,6 @@ sidebar_label: Compatibility versions | Feature | NiFi 1.16 | NiFi 1.17 | |------------------------------|-----------|-----------| -| Connection deployment | No | No | -| Connection cluster migration | No | No | +| Connection deployment | Yes | Yes | +| Connection cluster migration | Yes | Yes | | Connection multi cluster | No | No | \ No newline at end of file diff --git a/site/docs/5_references/4_nifi_parameter_context.md b/site/docs/5_references/4_nifi_parameter_context.md index 0b711efc7a..8d3256c582 100644 --- a/site/docs/5_references/4_nifi_parameter_context.md +++ b/site/docs/5_references/4_nifi_parameter_context.md @@ -52,8 +52,8 @@ spec: |Field|Type|Description|Required|Default| |-----|----|-----------|--------|--------| |metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects parameter contexts must create.|No|nil| -|spec|[NifiParameterContextSpec](#NifiParameterContextspec)|defines the desired state of NifiParameterContext.|No|nil| -|status|[NifiParameterContextStatus](#NifiParameterContextstatus)|defines the observed state of NifiParameterContext.|No|nil| +|spec|[NifiParameterContextSpec](#nifiparametercontextspec)|defines the desired state of NifiParameterContext.|No|nil| +|status|[NifiParameterContextStatus](#nifiparametercontextstatus)|defines the observed state of NifiParameterContext.|No|nil| ## NifiParameterContextsSpec diff --git a/site/docs/5_references/5_nifi_dataflow.md b/site/docs/5_references/5_nifi_dataflow.md index 50052ca78b..866d768c73 100644 --- a/site/docs/5_references/5_nifi_dataflow.md +++ b/site/docs/5_references/5_nifi_dataflow.md @@ -39,8 +39,8 @@ spec: |Field|Type|Description|Required|Default| |-----|----|-----------|--------|--------| |metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects dataflows must create.|No|nil| -|spec|[NifiDataflowSpec](#NifiDataflowspec)|defines the desired state of NifiDataflow.|No|nil| -|status|[NifiDataflowStatus](#NifiDataflowstatus)|defines the observed state of NifiDataflow.|No|nil| +|spec|[NifiDataflowSpec](#nifidataflowspec)|defines the desired state of NifiDataflow.|No|nil| +|status|[NifiDataflowStatus](#nifidataflowstatus)|defines the observed state of NifiDataflow.|No|nil| ## NifiDataflowsSpec @@ -55,7 +55,7 @@ spec: |syncMode|Enum={"never","always","once"}|if the flow will be synchronized once, continuously or never. |No| always | |skipInvalidControllerService|bool|whether the flow is considered as ran if some controller services are still invalid or not. |Yes| false | |skipInvalidComponent|bool|whether the flow is considered as ran if some components are still invalid or not. |Yes| false | -|updateStrategy|[DataflowUpdateStrategy](#dataflowupdatestrategy)|describes the way the operator will deal with data when a dataflow will be updated : Drop or Drain |Yes| drain | +|updateStrategy|[ComponentUpdateStrategy](#componentupdatestrategy)|describes the way the operator will deal with data when a dataflow will be updated : Drop or Drain |Yes| drain | |clusterRef|[ClusterReference](./2_nifi_user#clusterreference)| contains the reference to the NifiCluster with the one the user is linked. |Yes| - | |parameterContextRef|[ParameterContextReference](./4_nifi_parameter_context#parametercontextreference)| contains the reference to the ParameterContext with the one the dataflow is linked. |No| - | |registryClientRef|[RegistryClientReference](./3_nifi_registry_client#registryclientreference)| contains the reference to the NifiRegistry with the one the dataflow is linked. |Yes| - | @@ -69,7 +69,7 @@ spec: |latestUpdateRequest|[UpdateRequest](#updaterequest)|the latest update request sent. |Yes| - | |latestDropRequest|[DropRequest](#droprequest)|the latest queue drop request sent. |Yes| - | -## DataflowUpdateStrategy +## ComponentUpdateStrategy |Name|Value|Description| |-----|----|------------| diff --git a/site/docs/5_references/8_nifi_connection.md b/site/docs/5_references/8_nifi_connection.md new file mode 100644 index 0000000000..418a931d71 --- /dev/null +++ b/site/docs/5_references/8_nifi_connection.md @@ -0,0 +1,151 @@ +--- +id: 8_nifi_connection +title: NiFi Connection +sidebar_label: NiFi Connection +--- + +`NifiConnection` is the Schema for the NiFi connection API. + +```yaml +apiVersion: nifi.konpyutaika.com/v1alpha1 +kind: NifiConnection +metadata: + name: connection + namespace: instances +spec: + source: + name: input + namespace: instances + subName: output_1 + type: dataflow + destination: + name: output + namespace: instances + subName: input_1 + type: dataflow + configuration: + flowFileExpiration: 1 hour + backPressureDataSizeThreshold: 100 GB + backPressureObjectThreshold: 10000 + loadBalanceStrategy: PARTITION_BY_ATTRIBUTE + loadBalancePartitionAttribute: partition_attribute + loadBalanceCompression: DO_NOT_COMPRESS + prioritizers: + - NewestFlowFileFirstPrioritizer + - FirstInFirstOutPrioritizer + labelIndex: 0 + bends: + - posX: 550 + posY: 550 + - posX: 550 + posY: 440 + - posX: 550 + posY: 88 + updateStrategy: drain +``` + +## NifiDataflow + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|-------| +|metadata|[ObjectMetadata](https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#ObjectMeta)|is metadata that all persisted resources must have, which includes all objects dataflows must create.|No|nil| +|spec|[NifiConnectionSpec](#nificonnectionspec)|defines the desired state of NifiDataflow.|No|nil| +|status|[NifiConnectionStatus](#nificonnectionstatus)|defines the observed state of NifiDataflow.|No|nil| + +## NifiConnectionSpec + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|-------| +|source|[ComponentReference](#componentreference)|the Source component of the connection. |Yes| - | +|destination|[ComponentReference](#componentreference)|the Destination component of the connection. |Yes| - | +|configuration|[ConnectionConfiguration](#connectionconfiguration)|the version of the flow to run. |Yes| - | +|updateStrategy|[ComponentUpdateStrategy](#componentupdatestrategy)|describes the way the operator will deal with data when a connection will be deleted: Drop or Drain |Yes| drain | + +## NifiConnectionStatus + +|Field|Type|Description|Required|Default| +|-----|----|-----------|--------|-------| +|connectionID|string| connection ID. |Yes| - | +|state|[ConnectionState](#connectionstate)| the connection current state. |Yes| - | + +## ComponentUpdateStrategy + +|Name|Value|Description| +|----|-----|-----------| +|DrainStrategy|drain|leads to block stopping of input/output component until they are empty.| +|DropStrategy|drop|leads to dropping all flowfiles from the connection.| + +## ConnectionState + +|Name|Value|Description| +|----|-----|-----------| +|ConnectionStateCreated|Created|describes the status of a NifiConnection as created.| +|ConnectionStateOutOfSync|OutOfSync|describes the status of a NifiConnection as out of sync.| +|ConnectionStateInSync|InSync|describes the status of a NifiConnection as in sync.| + +## ComponentReference + +|Name|Value|Description|Required|Default| +|----|-----|-----------|--------|-------| +|name|string|the name of the component.|Yes| - | +|namespace|string|the namespace of the component.|Yes| - | +|type|[ComponentType](#componenttype)|the type of the component (e.g. nifidataflow).|Yes| - | +|subName|string|the name of the sub component (e.g. queue or port name).|No| - | + +## ComponentType + +|Name|Value|Description| +|----|-----|-----------| +|ComponentDataflow|dataflow|indicates that the component is a NifiDataflow.| +|ComponentInputPort|input-port|indicates that the component is a NifiInputPort. **(not implemented)**| +|ComponentOutputPort|output-port|indicates that the component is a NifiOutputPort. **(not implemented)**| +|ComponentProcessor|processor|indicates that the component is a NifiProcessor. **(not implemented)**| +|ComponentFunnel|funnel|indicates that the component is a NifiFunnel. **(not implemented)**| +|ComponentProcessGroup|process-group|indicates that the component is a NifiProcessGroup. **(not implemented)**| + +## ConnectionConfiguration + +|Name|Value|Description|Required|Default| +|----|-----|-----------|--------|-------| +|flowFileExpiration|string|the maximum amount of time an object may be in the flow before it will be automatically aged out of the flow.|No| - | +|backPressureDataSizeThreshold|string|the maximum data size of objects that can be queued before back pressure is applied.|No| 1 GB | +|backPressureObjectThreshold|*int64|the maximum number of objects that can be queued before back pressure is applied.|No| 10000 | +|loadBalanceStrategy|[ConnectionLoadBalanceStrategy](#connectionloadbalancestrategy)|how to load balance the data in this Connection across the nodes in the cluster.|No| DO_NOT_LOAD_BALANCE | +|loadBalancePartitionAttribute|string|the FlowFile Attribute to use for determining which node a FlowFile will go to.|No| - | +|loadBalanceCompression|[ConnectionLoadBalanceCompression](#connectionloadbalancecompression)|whether or not data should be compressed when being transferred between nodes in the cluster.|No| DO_NOT_COMPRESS | +|prioritizers|\[ \][ConnectionPrioritizer](#connectionprioritizer)|the comparators used to prioritize the queue.|No| - | +|labelIndex|*int32|the index of the bend point where to place the connection label.|No| - | +|bends|\[ \][ConnectionBend](#connectionbend)|the bend points on the connection.|No| - | + +## ConnectionLoadBalanceStrategy + +|Name|Value|Description| +|----|-----|-----------| +|StrategyDoNotLoadBalance|DO_NOT_LOAD_BALANCE|do not load balance FlowFiles between nodes in the cluster.| +|StrategyPartitionByAttribute|PARTITION_BY_ATTRIBUTE|determine which node to send a given FlowFile to based on the value of a user-specified FlowFile Attribute. All FlowFiles that have the same value for said Attribute will be sent to the same node in the cluster.| +|StrategyRoundRobin|ROUND_ROBIN|flowFiles will be distributed to nodes in the cluster in a Round-Robin fashion. However, if a node in the cluster is not able to receive data as fast as other nodes, that node may be skipped in one or more iterations in order to maximize throughput of data distribution across the cluster.| +|StrategySingle|SINGLE|all FlowFiles will be sent to the same node. Which node they are sent to is not defined.| + +## ConnectionLoadBalanceCompression + +|Name|Value|Description| +|----|-----|-----------| +|CompressionDoNotCompress|DO_NOT_COMPRESS|flowFiles will not be compressed.| +|CompressionCompressAttributesOnly|COMPRESS_ATTRIBUTES_ONLY|flowFiles' attributes will be compressed, but the flowFiles' contents will not be.| +|CompressionCompressAttributesAndContent|COMPRESS_ATTRIBUTES_AND_CONTENT|flowFiles' attributes and content will be compressed.| + +## ConnectionPrioritizer + +|Name|Value|Description| +|----|-----|-----------| +|PrioritizerFirstInFirstOutPrioritizer|FirstInFirstOutPrioritizer|given two FlowFiles, the one that reached the connection first will be processed first.| +|PrioritizerNewestFlowFileFirstPrioritizer|NewestFlowFileFirstPrioritizer|given two FlowFiles, the one that is newest in the dataflow will be processed first.| +|PrioritizerOldestFlowFileFirstPrioritizer|OldestFlowFileFirstPrioritizer|given two FlowFiles, the one that is oldest in the dataflow will be processed first. 'This is the default scheme that is used if no prioritizers are selected'.| +|PrioritizerPriorityAttributePrioritizer|PriorityAttributePrioritizer|given two FlowFiles, an attribute called “priority” will be extracted. The one that has the lowest priority value will be processed first.| + +## ConnectionBend + +|Name|Value|Description|Required|Default| +|----|-----|-----------|--------|-------| +|posX|*int64|the x coordinate.|No| - | +|posY|*int64|the y coordinate.|No| - | diff --git a/site/website/sidebars.json b/site/website/sidebars.json index 18cd94a7a9..0d18360909 100644 --- a/site/website/sidebars.json +++ b/site/website/sidebars.json @@ -15,7 +15,8 @@ "label": "Deploy NiFiKop", "items": [ "2_deploy_nifikop/1_quick_start", - "2_deploy_nifikop/2_customizable_install_with_helm" + "2_deploy_nifikop/2_customizable_install_with_helm", + "2_deploy_nifikop/3_kubectl_plugin" ] }, { @@ -92,6 +93,13 @@ "3_manage_nifi/3_manage_dataflows/0_design_principles", "3_manage_nifi/3_manage_dataflows/1_deploy_dataflow" ] + }, + { + "type": "category", + "label": "Manage Connections", + "items": [ + "3_manage_nifi/4_manage_connections/1_deploy_connection" + ] } ] }, @@ -118,7 +126,8 @@ "5_references/4_nifi_parameter_context", "5_references/5_nifi_dataflow", "5_references/6_nifi_usergroup", - "5_references/7_nifi_nodegroup_autoscaler" + "5_references/7_nifi_nodegroup_autoscaler", + "5_references/8_nifi_connection" ] }, { diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_0.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_0.jpg new file mode 100644 index 0000000000..a48b20a7a0 Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_0.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_1.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_1.jpg new file mode 100644 index 0000000000..837ae6beac Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_1.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_2.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_2.jpg new file mode 100644 index 0000000000..a1426cb29f Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_labelindex_2.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg new file mode 100644 index 0000000000..064dbaa16a Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_0.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_1.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_1.jpg new file mode 100644 index 0000000000..da2abf6818 Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_1.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_2.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_2.jpg new file mode 100644 index 0000000000..ad7e4c3093 Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_prioritizers_2.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_setup.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_setup.jpg new file mode 100644 index 0000000000..f7c5636b59 Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/connection_setup.jpg differ diff --git a/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/initial_setup.jpg b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/initial_setup.jpg new file mode 100644 index 0000000000..ab9009e512 Binary files /dev/null and b/site/website/static/img/3_tasks/4_manage_connections/1_deploy_connections/initial_setup.jpg differ