diff --git a/apis/kubedb/v1alpha2/constants.go b/apis/kubedb/v1alpha2/constants.go index b88577ab09..fad6318b5a 100644 --- a/apis/kubedb/v1alpha2/constants.go +++ b/apis/kubedb/v1alpha2/constants.go @@ -519,11 +519,14 @@ const ( KafkaBrokerClientPortName = "broker" KafkaControllerClientPortName = "controller" KafkaPortNameInternal = "internal" + KafkaPortNameExternal = "external" KafkaTopicNameHealth = "kafka-health" KafkaTopicDeletionThresholdOffset = 1000 + KafkaControllerMaxID = 1000 KafkaRESTPort = 9092 KafkaControllerRESTPort = 9093 KafkaInternalRESTPort = 29092 + KafkaExternalRESTPort = 19092 KafkaCruiseControlRESTPort = 9090 KafkaCruiseControlListenerPort = 9094 KafkaCCDefaultInNetwork = 500000 diff --git a/apis/kubedb/v1alpha2/kafka_types.go b/apis/kubedb/v1alpha2/kafka_types.go index 422d4c1cd3..d36890c3d3 100644 --- a/apis/kubedb/v1alpha2/kafka_types.go +++ b/apis/kubedb/v1alpha2/kafka_types.go @@ -219,6 +219,7 @@ const ( KafkaListenerBroker KafkaListenerType = "BROKER" KafkaListenerController KafkaListenerType = "CONTROLLER" KafkaListenerInternal KafkaListenerType = "INTERNAL" + KafkaListenerExternal KafkaListenerType = "EXTERNAL" KafkaListenerCC KafkaListenerType = "CC" ) diff --git a/apis/kubedb/v1alpha2/pgbouncer_types.go b/apis/kubedb/v1alpha2/pgbouncer_types.go index 0f4ae9222f..030076b62d 100644 --- a/apis/kubedb/v1alpha2/pgbouncer_types.go +++ b/apis/kubedb/v1alpha2/pgbouncer_types.go @@ -128,7 +128,7 @@ type Databases struct { type ConnectionPoolConfig struct { // Port is the port number on which PgBouncer listens to clients. Default: 5432. - // +kubebuilder:default=54342 + // +kubebuilder:default=5432 // +optional Port *int32 `json:"port,omitempty"` // PoolMode is the pooling mechanism type. Default: session. diff --git a/apis/ops/v1alpha1/constant.go b/apis/ops/v1alpha1/constant.go index 3c9a7aa13c..da98036729 100644 --- a/apis/ops/v1alpha1/constant.go +++ b/apis/ops/v1alpha1/constant.go @@ -161,6 +161,20 @@ const ( VolumeExpansionCoordinatingNode = "VolumeExpansionCoordinatingNode" ) +// Kafka Constants +const ( + ScaleUpBroker = "ScaleUpBroker" + ScaleUpController = "ScaleUpController" + ScaleUpCombined = "ScaleUpCombined" + ScaleDownBroker = "ScaleDownBroker" + ScaleDownController = "ScaleDownController" + ScaleDownCombined = "ScaleDownCombined" + + UpdateBrokerNodePVCs = "UpdateBrokerNodePVCs" + UpdateControllerNodePVCs = "UpdateControllerNodePVCs" + UpdateCombinedNodePVCs = "UpdateCombinedNodePVCs" +) + // MongoDB Constants const ( StartingBalancer = "StartingBalancer" diff --git a/apis/ops/v1alpha1/kafka_ops_helpers.go b/apis/ops/v1alpha1/kafka_ops_helpers.go new file mode 100644 index 0000000000..92c3ee2115 --- /dev/null +++ b/apis/ops/v1alpha1/kafka_ops_helpers.go @@ -0,0 +1,80 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "kubedb.dev/apimachinery/apis" + "kubedb.dev/apimachinery/apis/ops" + "kubedb.dev/apimachinery/crds" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "kmodules.xyz/client-go/apiextensions" +) + +func (_ *KafkaOpsRequest) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { + return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourcePluralKafkaOpsRequest)) +} + +var _ apis.ResourceInfo = &KafkaOpsRequest{} + +func (k *KafkaOpsRequest) ResourceFQN() string { + return fmt.Sprintf("%s.%s", ResourcePluralKafkaOpsRequest, ops.GroupName) +} + +func (k *KafkaOpsRequest) ResourceShortCode() string { + return ResourceCodeKafkaOpsRequest +} + +func (k *KafkaOpsRequest) ResourceKind() string { + return ResourceKindKafkaOpsRequest +} + +func (k *KafkaOpsRequest) ResourceSingular() string { + return ResourceSingularKafkaOpsRequest +} + +func (k *KafkaOpsRequest) ResourcePlural() string { + return ResourcePluralKafkaOpsRequest +} + +var _ Accessor = &KafkaOpsRequest{} + +func (k *KafkaOpsRequest) GetRequestType() any { + return k.Spec.Type +} + +func (k *KafkaOpsRequest) GetObjectMeta() metav1.ObjectMeta { + return k.ObjectMeta +} + +func (k *KafkaOpsRequest) GetUpdateVersionSpec() *KafkaUpdateVersionSpec { + return k.Spec.UpdateVersion +} + +func (k *KafkaOpsRequest) GetDBRefName() string { + return k.Spec.DatabaseRef.Name +} + +func (k *KafkaOpsRequest) GetStatus() OpsRequestStatus { + return k.Status +} + +func (k *KafkaOpsRequest) SetStatus(s OpsRequestStatus) { + k.Status = s +} diff --git a/apis/ops/v1alpha1/kafka_ops_types.go b/apis/ops/v1alpha1/kafka_ops_types.go new file mode 100644 index 0000000000..4ff026b474 --- /dev/null +++ b/apis/ops/v1alpha1/kafka_ops_types.go @@ -0,0 +1,173 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//go:generate go-enum --mustparse --names --values +package v1alpha1 + +import ( + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ResourceCodeKafkaOpsRequest = "kfops" + ResourceKindKafkaOpsRequest = "KafkaOpsRequest" + ResourceSingularKafkaOpsRequest = "kafkaopsrequest" + ResourcePluralKafkaOpsRequest = "kafkaopsrequests" +) + +// kafkaDBOpsRequest defines a Kafka DBA operation. + +// +genclient +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=kafkaopsrequests,singular=kafkaopsrequest,shortName=kfops,categories={datastore,kubedb,appscode} +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type KafkaOpsRequest struct { + metav1.TypeMeta `json:",inline,omitempty"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec KafkaOpsRequestSpec `json:"spec,omitempty"` + Status OpsRequestStatus `json:"status,omitempty"` +} + +// KafkaOpsRequestSpec is the spec for KafkaOpsRequest +type KafkaOpsRequestSpec struct { + // Specifies the Kafka reference + DatabaseRef core.LocalObjectReference `json:"databaseRef"` + // Specifies the ops request type: UpdateVersion, HorizontalScaling, VerticalScaling etc. + Type KafkaOpsRequestType `json:"type"` + // Specifies information necessary for upgrading Kafka + UpdateVersion *KafkaUpdateVersionSpec `json:"updateVersion,omitempty"` + // Specifies information necessary for horizontal scaling + HorizontalScaling *KafkaHorizontalScalingSpec `json:"horizontalScaling,omitempty"` + // Specifies information necessary for vertical scaling + VerticalScaling *KafkaVerticalScalingSpec `json:"verticalScaling,omitempty"` + // Specifies information necessary for volume expansion + VolumeExpansion *KafkaVolumeExpansionSpec `json:"volumeExpansion,omitempty"` + // Specifies information necessary for custom configuration of Kafka + Configuration *KafkaCustomConfigurationSpec `json:"configuration,omitempty"` + // Specifies information necessary for configuring TLS + TLS *TLSSpec `json:"tls,omitempty"` + // Specifies information necessary for restarting database + Restart *RestartSpec `json:"restart,omitempty"` + // Timeout for each step of the ops request in second. If a step doesn't finish within the specified timeout, the ops request will result in failure. + Timeout *metav1.Duration `json:"timeout,omitempty"` + // ApplyOption is to control the execution of OpsRequest depending on the database state. + // +kubebuilder:default="IfReady" + Apply ApplyOption `json:"apply,omitempty"` +} + +// +kubebuilder:validation:Enum=UpdateVersion;HorizontalScaling;VerticalScaling;VolumeExpansion;Restart;Reconfigure;ReconfigureTLS +// ENUM(UpdateVersion, HorizontalScaling, VerticalScaling, VolumeExpansion, Restart, Reconfigure, ReconfigureTLS) +type KafkaOpsRequestType string + +// KafkaReplicaReadinessCriteria is the criteria for checking readiness of a Kafka pod +// after updating, horizontal scaling etc. +type KafkaReplicaReadinessCriteria struct{} + +// KafkaUpdateVersionSpec contains the update version information of a kafka cluster +type KafkaUpdateVersionSpec struct { + // Specifies the target version name from catalog + TargetVersion string `json:"targetVersion,omitempty"` +} + +// KafkaHorizontalScalingSpec contains the horizontal scaling information of a Kafka cluster +type KafkaHorizontalScalingSpec struct { + // Number of combined (i.e. broker, controller) node + Node *int32 `json:"node,omitempty"` + // Node topology specification + Topology *KafkaHorizontalScalingTopologySpec `json:"topology,omitempty"` +} + +// KafkaHorizontalScalingTopologySpec contains the horizontal scaling information in cluster topology mode +type KafkaHorizontalScalingTopologySpec struct { + // Number of broker nodes + Broker *int32 `json:"broker,omitempty"` + // Number of controller nodes + Controller *int32 `json:"controller,omitempty"` +} + +// KafkaVerticalScalingSpec contains the vertical scaling information of a Kafka cluster +type KafkaVerticalScalingSpec struct { + // Resource spec for combined nodes + Node *core.ResourceRequirements `json:"node,omitempty"` + // Specifies the resource spec for cluster in topology mode + Topology *KafkaVerticalScalingTopologySpec `json:"topology,omitempty"` +} + +// KafkaVerticalScalingTopologySpec contains the vertical scaling information in cluster topology mode +type KafkaVerticalScalingTopologySpec struct { + // Resource spec for broker + Broker *core.ResourceRequirements `json:"broker,omitempty"` + // Resource spec for controller + Controller *core.ResourceRequirements `json:"controller,omitempty"` +} + +// KafkaVolumeExpansionSpec is the spec for Kafka volume expansion +type KafkaVolumeExpansionSpec struct { + // +kubebuilder:default="Online" + Mode *VolumeExpansionMode `json:"mode,omitempty"` + // volume specification for combined nodes + Node *resource.Quantity `json:"node,omitempty"` + // volume specification for kafka topology + Topology *KafkaVolumeExpansionTopologySpec `json:"topology,omitempty"` +} + +type KafkaVolumeExpansionTopologySpec struct { + // volume specification for broker + Broker *resource.Quantity `json:"broker,omitempty"` + // volume specification for controller + Controller *resource.Quantity `json:"controller,omitempty"` +} + +// KafkaCustomConfigurationSpec is the spec for Reconfiguring the Kafka Settings +type KafkaCustomConfigurationSpec struct { + // ConfigSecret is an optional field to provide custom configuration file for database. + // +optional + ConfigSecret *core.LocalObjectReference `json:"configSecret,omitempty"` + // ApplyConfig is an optional field to provide Kafka configuration. + // Provided configuration will be applied to config files stored in ConfigSecret. + // If the ConfigSecret is missing, the operator will create a new k8s secret by the + // following naming convention: {db-name}-user-config . + // Expected input format: + // applyConfig: + // file-name.properties: | + // key=value + // server.properties: | + // log.retention.ms=10000 + // +optional + ApplyConfig map[string]string `json:"applyConfig,omitempty"` + // If set to "true", the user provided configuration will be removed. + // The Kafka cluster will start will default configuration that is generated by the operator. + // +optional + RemoveCustomConfig bool `json:"removeCustomConfig,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaOpsRequestList is a list of KafkaOpsRequests +type KafkaOpsRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + // Items is a list of KafkaOpsRequest CRD objects + Items []KafkaOpsRequest `json:"items,omitempty"` +} diff --git a/apis/ops/v1alpha1/kafka_ops_types_enum.go b/apis/ops/v1alpha1/kafka_ops_types_enum.go new file mode 100644 index 0000000000..7b38b6be4d --- /dev/null +++ b/apis/ops/v1alpha1/kafka_ops_types_enum.go @@ -0,0 +1,100 @@ +// Code generated by go-enum DO NOT EDIT. +// Version: +// Revision: +// Build Date: +// Built By: + +package v1alpha1 + +import ( + "fmt" + "strings" +) + +const ( + // KafkaOpsRequestTypeUpdateVersion is a KafkaOpsRequestType of type UpdateVersion. + KafkaOpsRequestTypeUpdateVersion KafkaOpsRequestType = "UpdateVersion" + // KafkaOpsRequestTypeHorizontalScaling is a KafkaOpsRequestType of type HorizontalScaling. + KafkaOpsRequestTypeHorizontalScaling KafkaOpsRequestType = "HorizontalScaling" + // KafkaOpsRequestTypeVerticalScaling is a KafkaOpsRequestType of type VerticalScaling. + KafkaOpsRequestTypeVerticalScaling KafkaOpsRequestType = "VerticalScaling" + // KafkaOpsRequestTypeVolumeExpansion is a KafkaOpsRequestType of type VolumeExpansion. + KafkaOpsRequestTypeVolumeExpansion KafkaOpsRequestType = "VolumeExpansion" + // KafkaOpsRequestTypeRestart is a KafkaOpsRequestType of type Restart. + KafkaOpsRequestTypeRestart KafkaOpsRequestType = "Restart" + // KafkaOpsRequestTypeReconfigure is a KafkaOpsRequestType of type Reconfigure. + KafkaOpsRequestTypeReconfigure KafkaOpsRequestType = "Reconfigure" + // KafkaOpsRequestTypeReconfigureTLS is a KafkaOpsRequestType of type ReconfigureTLS. + KafkaOpsRequestTypeReconfigureTLS KafkaOpsRequestType = "ReconfigureTLS" +) + +var ErrInvalidKafkaOpsRequestType = fmt.Errorf("not a valid KafkaOpsRequestType, try [%s]", strings.Join(_KafkaOpsRequestTypeNames, ", ")) + +var _KafkaOpsRequestTypeNames = []string{ + string(KafkaOpsRequestTypeUpdateVersion), + string(KafkaOpsRequestTypeHorizontalScaling), + string(KafkaOpsRequestTypeVerticalScaling), + string(KafkaOpsRequestTypeVolumeExpansion), + string(KafkaOpsRequestTypeRestart), + string(KafkaOpsRequestTypeReconfigure), + string(KafkaOpsRequestTypeReconfigureTLS), +} + +// KafkaOpsRequestTypeNames returns a list of possible string values of KafkaOpsRequestType. +func KafkaOpsRequestTypeNames() []string { + tmp := make([]string, len(_KafkaOpsRequestTypeNames)) + copy(tmp, _KafkaOpsRequestTypeNames) + return tmp +} + +// KafkaOpsRequestTypeValues returns a list of the values for KafkaOpsRequestType +func KafkaOpsRequestTypeValues() []KafkaOpsRequestType { + return []KafkaOpsRequestType{ + KafkaOpsRequestTypeUpdateVersion, + KafkaOpsRequestTypeHorizontalScaling, + KafkaOpsRequestTypeVerticalScaling, + KafkaOpsRequestTypeVolumeExpansion, + KafkaOpsRequestTypeRestart, + KafkaOpsRequestTypeReconfigure, + KafkaOpsRequestTypeReconfigureTLS, + } +} + +// String implements the Stringer interface. +func (x KafkaOpsRequestType) String() string { + return string(x) +} + +// IsValid provides a quick way to determine if the typed value is +// part of the allowed enumerated values +func (x KafkaOpsRequestType) IsValid() bool { + _, err := ParseKafkaOpsRequestType(string(x)) + return err == nil +} + +var _KafkaOpsRequestTypeValue = map[string]KafkaOpsRequestType{ + "UpdateVersion": KafkaOpsRequestTypeUpdateVersion, + "HorizontalScaling": KafkaOpsRequestTypeHorizontalScaling, + "VerticalScaling": KafkaOpsRequestTypeVerticalScaling, + "VolumeExpansion": KafkaOpsRequestTypeVolumeExpansion, + "Restart": KafkaOpsRequestTypeRestart, + "Reconfigure": KafkaOpsRequestTypeReconfigure, + "ReconfigureTLS": KafkaOpsRequestTypeReconfigureTLS, +} + +// ParseKafkaOpsRequestType attempts to convert a string to a KafkaOpsRequestType. +func ParseKafkaOpsRequestType(name string) (KafkaOpsRequestType, error) { + if x, ok := _KafkaOpsRequestTypeValue[name]; ok { + return x, nil + } + return KafkaOpsRequestType(""), fmt.Errorf("%s is %w", name, ErrInvalidKafkaOpsRequestType) +} + +// MustParseKafkaOpsRequestType converts a string to a KafkaOpsRequestType, and panics if is not valid. +func MustParseKafkaOpsRequestType(name string) KafkaOpsRequestType { + val, err := ParseKafkaOpsRequestType(name) + if err != nil { + panic(err) + } + return val +} diff --git a/apis/ops/v1alpha1/openapi_generated.go b/apis/ops/v1alpha1/openapi_generated.go index 36e68ff0f9..be570e81f2 100644 --- a/apis/ops/v1alpha1/openapi_generated.go +++ b/apis/ops/v1alpha1/openapi_generated.go @@ -452,6 +452,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kubedb.dev/apimachinery/apis/ops/v1alpha1.EtcdVerticalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_EtcdVerticalScalingSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.EtcdVolumeExpansionSpec": schema_apimachinery_apis_ops_v1alpha1_EtcdVolumeExpansionSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.HiddenNode": schema_apimachinery_apis_ops_v1alpha1_HiddenNode(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaCustomConfigurationSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaCustomConfigurationSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaHorizontalScalingSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingTopologySpec": schema_apimachinery_apis_ops_v1alpha1_KafkaHorizontalScalingTopologySpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequest": schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequest(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequestList": schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequestList(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequestSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequestSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaReplicaReadinessCriteria": schema_apimachinery_apis_ops_v1alpha1_KafkaReplicaReadinessCriteria(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaUpdateVersionSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaUpdateVersionSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaVerticalScalingSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingTopologySpec": schema_apimachinery_apis_ops_v1alpha1_KafkaVerticalScalingTopologySpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionSpec": schema_apimachinery_apis_ops_v1alpha1_KafkaVolumeExpansionSpec(ref), + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionTopologySpec": schema_apimachinery_apis_ops_v1alpha1_KafkaVolumeExpansionTopologySpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.MariaDBCustomConfiguration": schema_apimachinery_apis_ops_v1alpha1_MariaDBCustomConfiguration(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.MariaDBCustomConfigurationSpec": schema_apimachinery_apis_ops_v1alpha1_MariaDBCustomConfigurationSpec(ref), "kubedb.dev/apimachinery/apis/ops/v1alpha1.MariaDBHorizontalScalingSpec": schema_apimachinery_apis_ops_v1alpha1_MariaDBHorizontalScalingSpec(ref), @@ -22466,6 +22478,430 @@ func schema_apimachinery_apis_ops_v1alpha1_HiddenNode(ref common.ReferenceCallba } } +func schema_apimachinery_apis_ops_v1alpha1_KafkaCustomConfigurationSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaCustomConfigurationSpec is the spec for Reconfiguring the Kafka Settings", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configSecret": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigSecret is an optional field to provide custom configuration file for database.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "applyConfig": { + SchemaProps: spec.SchemaProps{ + Description: "ApplyConfig is an optional field to provide Kafka configuration. Provided configuration will be applied to config files stored in ConfigSecret. If the ConfigSecret is missing, the operator will create a new k8s secret by the following naming convention: {db-name}-user-config . Expected input format:\n\tapplyConfig:\n\t\tfile-name.properties: |\n\t\t\tkey=value\n\t\tserver.properties: |\n\t\t\tlog.retention.ms=10000", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "removeCustomConfig": { + SchemaProps: spec.SchemaProps{ + Description: "If set to \"true\", the user provided configuration will be removed. The Kafka cluster will start will default configuration that is generated by the operator.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaHorizontalScalingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaHorizontalScalingSpec contains the horizontal scaling information of a Kafka cluster", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "node": { + SchemaProps: spec.SchemaProps{ + Description: "Number of combined (i.e. broker, controller) node", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "Node topology specification", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingTopologySpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingTopologySpec"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaHorizontalScalingTopologySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaHorizontalScalingTopologySpec contains the horizontal scaling information in cluster topology mode", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "broker": { + SchemaProps: spec.SchemaProps{ + Description: "Number of broker nodes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "controller": { + SchemaProps: spec.SchemaProps{ + Description: "Number of controller nodes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequestSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.OpsRequestStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequestSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.OpsRequestStatus"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequestList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaOpsRequestList is a list of KafkaOpsRequests", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of KafkaOpsRequest CRD objects", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequest"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaOpsRequest"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaOpsRequestSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaOpsRequestSpec is the spec for KafkaOpsRequest", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "databaseRef": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the Kafka reference", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the ops request type: UpdateVersion, HorizontalScaling, VerticalScaling etc.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "updateVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for upgrading Kafka", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaUpdateVersionSpec"), + }, + }, + "horizontalScaling": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for horizontal scaling", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingSpec"), + }, + }, + "verticalScaling": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for vertical scaling", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingSpec"), + }, + }, + "volumeExpansion": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for volume expansion", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionSpec"), + }, + }, + "configuration": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for custom configuration of Kafka", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaCustomConfigurationSpec"), + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for configuring TLS", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.TLSSpec"), + }, + }, + "restart": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies information necessary for restarting database", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.RestartSpec"), + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout for each step of the ops request in second. If a step doesn't finish within the specified timeout, the ops request will result in failure.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "apply": { + SchemaProps: spec.SchemaProps{ + Description: "ApplyOption is to control the execution of OpsRequest depending on the database state.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"databaseRef", "type"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaCustomConfigurationSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaHorizontalScalingSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaUpdateVersionSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.RestartSpec", "kubedb.dev/apimachinery/apis/ops/v1alpha1.TLSSpec"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaReplicaReadinessCriteria(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaReplicaReadinessCriteria is the criteria for checking readiness of a Kafka pod after updating, horizontal scaling etc.", + Type: []string{"object"}, + }, + }, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaUpdateVersionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaUpdateVersionSpec contains the update version information of a kafka cluster", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the target version name from catalog", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaVerticalScalingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaVerticalScalingSpec contains the vertical scaling information of a Kafka cluster", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "node": { + SchemaProps: spec.SchemaProps{ + Description: "Resource spec for combined nodes", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the resource spec for cluster in topology mode", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingTopologySpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceRequirements", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVerticalScalingTopologySpec"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaVerticalScalingTopologySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaVerticalScalingTopologySpec contains the vertical scaling information in cluster topology mode", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "broker": { + SchemaProps: spec.SchemaProps{ + Description: "Resource spec for broker", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "controller": { + SchemaProps: spec.SchemaProps{ + Description: "Resource spec for controller", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceRequirements"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaVolumeExpansionSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaVolumeExpansionSpec is the spec for Kafka volume expansion", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "mode": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "node": { + SchemaProps: spec.SchemaProps{ + Description: "volume specification for combined nodes", + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "volume specification for kafka topology", + Ref: ref("kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionTopologySpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity", "kubedb.dev/apimachinery/apis/ops/v1alpha1.KafkaVolumeExpansionTopologySpec"}, + } +} + +func schema_apimachinery_apis_ops_v1alpha1_KafkaVolumeExpansionTopologySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "broker": { + SchemaProps: spec.SchemaProps{ + Description: "volume specification for broker", + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + "controller": { + SchemaProps: spec.SchemaProps{ + Description: "volume specification for controller", + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + func schema_apimachinery_apis_ops_v1alpha1_MariaDBCustomConfiguration(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/apis/ops/v1alpha1/register.go b/apis/ops/v1alpha1/register.go index 3f9a8c1149..fd2fe04b50 100644 --- a/apis/ops/v1alpha1/register.go +++ b/apis/ops/v1alpha1/register.go @@ -58,6 +58,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ElasticsearchOpsRequestList{}, &EtcdOpsRequest{}, &EtcdOpsRequestList{}, + &KafkaOpsRequest{}, + &KafkaOpsRequestList{}, &MemcachedOpsRequest{}, &MemcachedOpsRequestList{}, &MongoDBOpsRequest{}, diff --git a/apis/ops/v1alpha1/zz_generated.deepcopy.go b/apis/ops/v1alpha1/zz_generated.deepcopy.go index 015bab98da..50068e7ece 100644 --- a/apis/ops/v1alpha1/zz_generated.deepcopy.go +++ b/apis/ops/v1alpha1/zz_generated.deepcopy.go @@ -826,6 +826,345 @@ func (in *HiddenNode) DeepCopy() *HiddenNode { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaCustomConfigurationSpec) DeepCopyInto(out *KafkaCustomConfigurationSpec) { + *out = *in + if in.ConfigSecret != nil { + in, out := &in.ConfigSecret, &out.ConfigSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.ApplyConfig != nil { + in, out := &in.ApplyConfig, &out.ApplyConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaCustomConfigurationSpec. +func (in *KafkaCustomConfigurationSpec) DeepCopy() *KafkaCustomConfigurationSpec { + if in == nil { + return nil + } + out := new(KafkaCustomConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaHorizontalScalingSpec) DeepCopyInto(out *KafkaHorizontalScalingSpec) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(int32) + **out = **in + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(KafkaHorizontalScalingTopologySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaHorizontalScalingSpec. +func (in *KafkaHorizontalScalingSpec) DeepCopy() *KafkaHorizontalScalingSpec { + if in == nil { + return nil + } + out := new(KafkaHorizontalScalingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaHorizontalScalingTopologySpec) DeepCopyInto(out *KafkaHorizontalScalingTopologySpec) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + *out = new(int32) + **out = **in + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaHorizontalScalingTopologySpec. +func (in *KafkaHorizontalScalingTopologySpec) DeepCopy() *KafkaHorizontalScalingTopologySpec { + if in == nil { + return nil + } + out := new(KafkaHorizontalScalingTopologySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaOpsRequest) DeepCopyInto(out *KafkaOpsRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaOpsRequest. +func (in *KafkaOpsRequest) DeepCopy() *KafkaOpsRequest { + if in == nil { + return nil + } + out := new(KafkaOpsRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaOpsRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaOpsRequestList) DeepCopyInto(out *KafkaOpsRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaOpsRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaOpsRequestList. +func (in *KafkaOpsRequestList) DeepCopy() *KafkaOpsRequestList { + if in == nil { + return nil + } + out := new(KafkaOpsRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaOpsRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaOpsRequestSpec) DeepCopyInto(out *KafkaOpsRequestSpec) { + *out = *in + out.DatabaseRef = in.DatabaseRef + if in.UpdateVersion != nil { + in, out := &in.UpdateVersion, &out.UpdateVersion + *out = new(KafkaUpdateVersionSpec) + **out = **in + } + if in.HorizontalScaling != nil { + in, out := &in.HorizontalScaling, &out.HorizontalScaling + *out = new(KafkaHorizontalScalingSpec) + (*in).DeepCopyInto(*out) + } + if in.VerticalScaling != nil { + in, out := &in.VerticalScaling, &out.VerticalScaling + *out = new(KafkaVerticalScalingSpec) + (*in).DeepCopyInto(*out) + } + if in.VolumeExpansion != nil { + in, out := &in.VolumeExpansion, &out.VolumeExpansion + *out = new(KafkaVolumeExpansionSpec) + (*in).DeepCopyInto(*out) + } + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(KafkaCustomConfigurationSpec) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSSpec) + (*in).DeepCopyInto(*out) + } + if in.Restart != nil { + in, out := &in.Restart, &out.Restart + *out = new(RestartSpec) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaOpsRequestSpec. +func (in *KafkaOpsRequestSpec) DeepCopy() *KafkaOpsRequestSpec { + if in == nil { + return nil + } + out := new(KafkaOpsRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaReplicaReadinessCriteria) DeepCopyInto(out *KafkaReplicaReadinessCriteria) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaReplicaReadinessCriteria. +func (in *KafkaReplicaReadinessCriteria) DeepCopy() *KafkaReplicaReadinessCriteria { + if in == nil { + return nil + } + out := new(KafkaReplicaReadinessCriteria) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaUpdateVersionSpec) DeepCopyInto(out *KafkaUpdateVersionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaUpdateVersionSpec. +func (in *KafkaUpdateVersionSpec) DeepCopy() *KafkaUpdateVersionSpec { + if in == nil { + return nil + } + out := new(KafkaUpdateVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaVerticalScalingSpec) DeepCopyInto(out *KafkaVerticalScalingSpec) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(KafkaVerticalScalingTopologySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaVerticalScalingSpec. +func (in *KafkaVerticalScalingSpec) DeepCopy() *KafkaVerticalScalingSpec { + if in == nil { + return nil + } + out := new(KafkaVerticalScalingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaVerticalScalingTopologySpec) DeepCopyInto(out *KafkaVerticalScalingTopologySpec) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaVerticalScalingTopologySpec. +func (in *KafkaVerticalScalingTopologySpec) DeepCopy() *KafkaVerticalScalingTopologySpec { + if in == nil { + return nil + } + out := new(KafkaVerticalScalingTopologySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaVolumeExpansionSpec) DeepCopyInto(out *KafkaVolumeExpansionSpec) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(VolumeExpansionMode) + **out = **in + } + if in.Node != nil { + in, out := &in.Node, &out.Node + x := (*in).DeepCopy() + *out = &x + } + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(KafkaVolumeExpansionTopologySpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaVolumeExpansionSpec. +func (in *KafkaVolumeExpansionSpec) DeepCopy() *KafkaVolumeExpansionSpec { + if in == nil { + return nil + } + out := new(KafkaVolumeExpansionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaVolumeExpansionTopologySpec) DeepCopyInto(out *KafkaVolumeExpansionTopologySpec) { + *out = *in + if in.Broker != nil { + in, out := &in.Broker, &out.Broker + x := (*in).DeepCopy() + *out = &x + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaVolumeExpansionTopologySpec. +func (in *KafkaVolumeExpansionTopologySpec) DeepCopy() *KafkaVolumeExpansionTopologySpec { + if in == nil { + return nil + } + out := new(KafkaVolumeExpansionTopologySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MariaDBCustomConfiguration) DeepCopyInto(out *MariaDBCustomConfiguration) { *out = *in diff --git a/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_kafkaopsrequest.go b/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_kafkaopsrequest.go new file mode 100644 index 0000000000..b10804ef2e --- /dev/null +++ b/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_kafkaopsrequest.go @@ -0,0 +1,143 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "kubedb.dev/apimachinery/apis/ops/v1alpha1" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeKafkaOpsRequests implements KafkaOpsRequestInterface +type FakeKafkaOpsRequests struct { + Fake *FakeOpsV1alpha1 + ns string +} + +var kafkaopsrequestsResource = schema.GroupVersionResource{Group: "ops.kubedb.com", Version: "v1alpha1", Resource: "kafkaopsrequests"} + +var kafkaopsrequestsKind = schema.GroupVersionKind{Group: "ops.kubedb.com", Version: "v1alpha1", Kind: "KafkaOpsRequest"} + +// Get takes name of the kafkaOpsRequest, and returns the corresponding kafkaOpsRequest object, and an error if there is any. +func (c *FakeKafkaOpsRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkaopsrequestsResource, c.ns, name), &v1alpha1.KafkaOpsRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaOpsRequest), err +} + +// List takes label and field selectors, and returns the list of KafkaOpsRequests that match those selectors. +func (c *FakeKafkaOpsRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KafkaOpsRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkaopsrequestsResource, kafkaopsrequestsKind, c.ns, opts), &v1alpha1.KafkaOpsRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.KafkaOpsRequestList{ListMeta: obj.(*v1alpha1.KafkaOpsRequestList).ListMeta} + for _, item := range obj.(*v1alpha1.KafkaOpsRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaOpsRequests. +func (c *FakeKafkaOpsRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkaopsrequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaOpsRequest and creates it. Returns the server's representation of the kafkaOpsRequest, and an error, if there is any. +func (c *FakeKafkaOpsRequests) Create(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.CreateOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkaopsrequestsResource, c.ns, kafkaOpsRequest), &v1alpha1.KafkaOpsRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaOpsRequest), err +} + +// Update takes the representation of a kafkaOpsRequest and updates it. Returns the server's representation of the kafkaOpsRequest, and an error, if there is any. +func (c *FakeKafkaOpsRequests) Update(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkaopsrequestsResource, c.ns, kafkaOpsRequest), &v1alpha1.KafkaOpsRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaOpsRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaOpsRequests) UpdateStatus(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (*v1alpha1.KafkaOpsRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkaopsrequestsResource, "status", c.ns, kafkaOpsRequest), &v1alpha1.KafkaOpsRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaOpsRequest), err +} + +// Delete takes name of the kafkaOpsRequest and deletes it. Returns an error if one occurs. +func (c *FakeKafkaOpsRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kafkaopsrequestsResource, c.ns, name, opts), &v1alpha1.KafkaOpsRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaOpsRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkaopsrequestsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.KafkaOpsRequestList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaOpsRequest. +func (c *FakeKafkaOpsRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KafkaOpsRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkaopsrequestsResource, c.ns, name, pt, data, subresources...), &v1alpha1.KafkaOpsRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.KafkaOpsRequest), err +} diff --git a/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_ops_client.go b/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_ops_client.go index c936abdc90..ee546799f2 100644 --- a/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_ops_client.go +++ b/client/clientset/versioned/typed/ops/v1alpha1/fake/fake_ops_client.go @@ -37,6 +37,10 @@ func (c *FakeOpsV1alpha1) EtcdOpsRequests(namespace string) v1alpha1.EtcdOpsRequ return &FakeEtcdOpsRequests{c, namespace} } +func (c *FakeOpsV1alpha1) KafkaOpsRequests(namespace string) v1alpha1.KafkaOpsRequestInterface { + return &FakeKafkaOpsRequests{c, namespace} +} + func (c *FakeOpsV1alpha1) MariaDBOpsRequests(namespace string) v1alpha1.MariaDBOpsRequestInterface { return &FakeMariaDBOpsRequests{c, namespace} } diff --git a/client/clientset/versioned/typed/ops/v1alpha1/generated_expansion.go b/client/clientset/versioned/typed/ops/v1alpha1/generated_expansion.go index 3841183601..26d29cd2cf 100644 --- a/client/clientset/versioned/typed/ops/v1alpha1/generated_expansion.go +++ b/client/clientset/versioned/typed/ops/v1alpha1/generated_expansion.go @@ -22,6 +22,8 @@ type ElasticsearchOpsRequestExpansion interface{} type EtcdOpsRequestExpansion interface{} +type KafkaOpsRequestExpansion interface{} + type MariaDBOpsRequestExpansion interface{} type MemcachedOpsRequestExpansion interface{} diff --git a/client/clientset/versioned/typed/ops/v1alpha1/kafkaopsrequest.go b/client/clientset/versioned/typed/ops/v1alpha1/kafkaopsrequest.go new file mode 100644 index 0000000000..d24494a999 --- /dev/null +++ b/client/clientset/versioned/typed/ops/v1alpha1/kafkaopsrequest.go @@ -0,0 +1,196 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "kubedb.dev/apimachinery/apis/ops/v1alpha1" + scheme "kubedb.dev/apimachinery/client/clientset/versioned/scheme" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// KafkaOpsRequestsGetter has a method to return a KafkaOpsRequestInterface. +// A group's client should implement this interface. +type KafkaOpsRequestsGetter interface { + KafkaOpsRequests(namespace string) KafkaOpsRequestInterface +} + +// KafkaOpsRequestInterface has methods to work with KafkaOpsRequest resources. +type KafkaOpsRequestInterface interface { + Create(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.CreateOptions) (*v1alpha1.KafkaOpsRequest, error) + Update(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (*v1alpha1.KafkaOpsRequest, error) + UpdateStatus(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (*v1alpha1.KafkaOpsRequest, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.KafkaOpsRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.KafkaOpsRequestList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KafkaOpsRequest, err error) + KafkaOpsRequestExpansion +} + +// kafkaOpsRequests implements KafkaOpsRequestInterface +type kafkaOpsRequests struct { + client rest.Interface + ns string +} + +// newKafkaOpsRequests returns a KafkaOpsRequests +func newKafkaOpsRequests(c *OpsV1alpha1Client, namespace string) *kafkaOpsRequests { + return &kafkaOpsRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaOpsRequest, and returns the corresponding kafkaOpsRequest object, and an error if there is any. +func (c *kafkaOpsRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + result = &v1alpha1.KafkaOpsRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaOpsRequests that match those selectors. +func (c *kafkaOpsRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.KafkaOpsRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.KafkaOpsRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaOpsRequests. +func (c *kafkaOpsRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kafkaOpsRequest and creates it. Returns the server's representation of the kafkaOpsRequest, and an error, if there is any. +func (c *kafkaOpsRequests) Create(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.CreateOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + result = &v1alpha1.KafkaOpsRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaOpsRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kafkaOpsRequest and updates it. Returns the server's representation of the kafkaOpsRequest, and an error, if there is any. +func (c *kafkaOpsRequests) Update(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + result = &v1alpha1.KafkaOpsRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + Name(kafkaOpsRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaOpsRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kafkaOpsRequests) UpdateStatus(ctx context.Context, kafkaOpsRequest *v1alpha1.KafkaOpsRequest, opts v1.UpdateOptions) (result *v1alpha1.KafkaOpsRequest, err error) { + result = &v1alpha1.KafkaOpsRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + Name(kafkaOpsRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaOpsRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kafkaOpsRequest and deletes it. Returns an error if one occurs. +func (c *kafkaOpsRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaOpsRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkaopsrequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kafkaOpsRequest. +func (c *kafkaOpsRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.KafkaOpsRequest, err error) { + result = &v1alpha1.KafkaOpsRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkaopsrequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client/clientset/versioned/typed/ops/v1alpha1/ops_client.go b/client/clientset/versioned/typed/ops/v1alpha1/ops_client.go index 3a64f635ed..89ae094f67 100644 --- a/client/clientset/versioned/typed/ops/v1alpha1/ops_client.go +++ b/client/clientset/versioned/typed/ops/v1alpha1/ops_client.go @@ -31,6 +31,7 @@ type OpsV1alpha1Interface interface { RESTClient() rest.Interface ElasticsearchOpsRequestsGetter EtcdOpsRequestsGetter + KafkaOpsRequestsGetter MariaDBOpsRequestsGetter MemcachedOpsRequestsGetter MongoDBOpsRequestsGetter @@ -56,6 +57,10 @@ func (c *OpsV1alpha1Client) EtcdOpsRequests(namespace string) EtcdOpsRequestInte return newEtcdOpsRequests(c, namespace) } +func (c *OpsV1alpha1Client) KafkaOpsRequests(namespace string) KafkaOpsRequestInterface { + return newKafkaOpsRequests(c, namespace) +} + func (c *OpsV1alpha1Client) MariaDBOpsRequests(namespace string) MariaDBOpsRequestInterface { return newMariaDBOpsRequests(c, namespace) } diff --git a/client/informers/externalversions/generic.go b/client/informers/externalversions/generic.go index 8652640bfa..4024964aba 100644 --- a/client/informers/externalversions/generic.go +++ b/client/informers/externalversions/generic.go @@ -148,6 +148,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Ops().V1alpha1().ElasticsearchOpsRequests().Informer()}, nil case opsv1alpha1.SchemeGroupVersion.WithResource("etcdopsrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ops().V1alpha1().EtcdOpsRequests().Informer()}, nil + case opsv1alpha1.SchemeGroupVersion.WithResource("kafkaopsrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Ops().V1alpha1().KafkaOpsRequests().Informer()}, nil case opsv1alpha1.SchemeGroupVersion.WithResource("mariadbopsrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ops().V1alpha1().MariaDBOpsRequests().Informer()}, nil case opsv1alpha1.SchemeGroupVersion.WithResource("memcachedopsrequests"): diff --git a/client/informers/externalversions/ops/v1alpha1/interface.go b/client/informers/externalversions/ops/v1alpha1/interface.go index 2d7606b2e0..c1d7a14578 100644 --- a/client/informers/externalversions/ops/v1alpha1/interface.go +++ b/client/informers/externalversions/ops/v1alpha1/interface.go @@ -28,6 +28,8 @@ type Interface interface { ElasticsearchOpsRequests() ElasticsearchOpsRequestInformer // EtcdOpsRequests returns a EtcdOpsRequestInformer. EtcdOpsRequests() EtcdOpsRequestInformer + // KafkaOpsRequests returns a KafkaOpsRequestInformer. + KafkaOpsRequests() KafkaOpsRequestInformer // MariaDBOpsRequests returns a MariaDBOpsRequestInformer. MariaDBOpsRequests() MariaDBOpsRequestInformer // MemcachedOpsRequests returns a MemcachedOpsRequestInformer. @@ -71,6 +73,11 @@ func (v *version) EtcdOpsRequests() EtcdOpsRequestInformer { return &etcdOpsRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// KafkaOpsRequests returns a KafkaOpsRequestInformer. +func (v *version) KafkaOpsRequests() KafkaOpsRequestInformer { + return &kafkaOpsRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // MariaDBOpsRequests returns a MariaDBOpsRequestInformer. func (v *version) MariaDBOpsRequests() MariaDBOpsRequestInformer { return &mariaDBOpsRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/client/informers/externalversions/ops/v1alpha1/kafkaopsrequest.go b/client/informers/externalversions/ops/v1alpha1/kafkaopsrequest.go new file mode 100644 index 0000000000..b173c12ffc --- /dev/null +++ b/client/informers/externalversions/ops/v1alpha1/kafkaopsrequest.go @@ -0,0 +1,91 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + opsv1alpha1 "kubedb.dev/apimachinery/apis/ops/v1alpha1" + versioned "kubedb.dev/apimachinery/client/clientset/versioned" + internalinterfaces "kubedb.dev/apimachinery/client/informers/externalversions/internalinterfaces" + v1alpha1 "kubedb.dev/apimachinery/client/listers/ops/v1alpha1" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// KafkaOpsRequestInformer provides access to a shared informer and lister for +// KafkaOpsRequests. +type KafkaOpsRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.KafkaOpsRequestLister +} + +type kafkaOpsRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaOpsRequestInformer constructs a new informer for KafkaOpsRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaOpsRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaOpsRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaOpsRequestInformer constructs a new informer for KafkaOpsRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaOpsRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OpsV1alpha1().KafkaOpsRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OpsV1alpha1().KafkaOpsRequests(namespace).Watch(context.TODO(), options) + }, + }, + &opsv1alpha1.KafkaOpsRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaOpsRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaOpsRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaOpsRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&opsv1alpha1.KafkaOpsRequest{}, f.defaultInformer) +} + +func (f *kafkaOpsRequestInformer) Lister() v1alpha1.KafkaOpsRequestLister { + return v1alpha1.NewKafkaOpsRequestLister(f.Informer().GetIndexer()) +} diff --git a/client/listers/ops/v1alpha1/expansion_generated.go b/client/listers/ops/v1alpha1/expansion_generated.go index 6135fd5944..5f77e7a9a1 100644 --- a/client/listers/ops/v1alpha1/expansion_generated.go +++ b/client/listers/ops/v1alpha1/expansion_generated.go @@ -34,6 +34,14 @@ type EtcdOpsRequestListerExpansion interface{} // EtcdOpsRequestNamespaceLister. type EtcdOpsRequestNamespaceListerExpansion interface{} +// KafkaOpsRequestListerExpansion allows custom methods to be added to +// KafkaOpsRequestLister. +type KafkaOpsRequestListerExpansion interface{} + +// KafkaOpsRequestNamespaceListerExpansion allows custom methods to be added to +// KafkaOpsRequestNamespaceLister. +type KafkaOpsRequestNamespaceListerExpansion interface{} + // MariaDBOpsRequestListerExpansion allows custom methods to be added to // MariaDBOpsRequestLister. type MariaDBOpsRequestListerExpansion interface{} diff --git a/client/listers/ops/v1alpha1/kafkaopsrequest.go b/client/listers/ops/v1alpha1/kafkaopsrequest.go new file mode 100644 index 0000000000..d54229fd05 --- /dev/null +++ b/client/listers/ops/v1alpha1/kafkaopsrequest.go @@ -0,0 +1,100 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "kubedb.dev/apimachinery/apis/ops/v1alpha1" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// KafkaOpsRequestLister helps list KafkaOpsRequests. +// All objects returned here must be treated as read-only. +type KafkaOpsRequestLister interface { + // List lists all KafkaOpsRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KafkaOpsRequest, err error) + // KafkaOpsRequests returns an object that can list and get KafkaOpsRequests. + KafkaOpsRequests(namespace string) KafkaOpsRequestNamespaceLister + KafkaOpsRequestListerExpansion +} + +// kafkaOpsRequestLister implements the KafkaOpsRequestLister interface. +type kafkaOpsRequestLister struct { + indexer cache.Indexer +} + +// NewKafkaOpsRequestLister returns a new KafkaOpsRequestLister. +func NewKafkaOpsRequestLister(indexer cache.Indexer) KafkaOpsRequestLister { + return &kafkaOpsRequestLister{indexer: indexer} +} + +// List lists all KafkaOpsRequests in the indexer. +func (s *kafkaOpsRequestLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaOpsRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaOpsRequest)) + }) + return ret, err +} + +// KafkaOpsRequests returns an object that can list and get KafkaOpsRequests. +func (s *kafkaOpsRequestLister) KafkaOpsRequests(namespace string) KafkaOpsRequestNamespaceLister { + return kafkaOpsRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaOpsRequestNamespaceLister helps list and get KafkaOpsRequests. +// All objects returned here must be treated as read-only. +type KafkaOpsRequestNamespaceLister interface { + // List lists all KafkaOpsRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.KafkaOpsRequest, err error) + // Get retrieves the KafkaOpsRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.KafkaOpsRequest, error) + KafkaOpsRequestNamespaceListerExpansion +} + +// kafkaOpsRequestNamespaceLister implements the KafkaOpsRequestNamespaceLister +// interface. +type kafkaOpsRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaOpsRequests in the indexer for a given namespace. +func (s kafkaOpsRequestNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.KafkaOpsRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.KafkaOpsRequest)) + }) + return ret, err +} + +// Get retrieves the KafkaOpsRequest from the indexer for a given namespace and name. +func (s kafkaOpsRequestNamespaceLister) Get(name string) (*v1alpha1.KafkaOpsRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("kafkaopsrequest"), name) + } + return obj.(*v1alpha1.KafkaOpsRequest), nil +} diff --git a/crds/kubedb.com_pgbouncers.yaml b/crds/kubedb.com_pgbouncers.yaml index c9778c55c0..c231d74acf 100644 --- a/crds/kubedb.com_pgbouncers.yaml +++ b/crds/kubedb.com_pgbouncers.yaml @@ -103,7 +103,7 @@ spec: default: session type: string port: - default: 54342 + default: 5432 format: int32 type: integer reservePoolSize: diff --git a/crds/ops.kubedb.com_kafkaopsrequests.yaml b/crds/ops.kubedb.com_kafkaopsrequests.yaml new file mode 100644 index 0000000000..663e764a78 --- /dev/null +++ b/crds/ops.kubedb.com_kafkaopsrequests.yaml @@ -0,0 +1,354 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kubedb + name: kafkaopsrequests.ops.kubedb.com +spec: + group: ops.kubedb.com + names: + categories: + - datastore + - kubedb + - appscode + kind: KafkaOpsRequest + listKind: KafkaOpsRequestList + plural: kafkaopsrequests + shortNames: + - kfops + singular: kafkaopsrequest + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.type + name: Type + type: string + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + apply: + default: IfReady + enum: + - IfReady + - Always + type: string + configuration: + properties: + applyConfig: + additionalProperties: + type: string + type: object + configSecret: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + removeCustomConfig: + type: boolean + type: object + databaseRef: + properties: + name: + type: string + type: object + x-kubernetes-map-type: atomic + horizontalScaling: + properties: + node: + format: int32 + type: integer + topology: + properties: + broker: + format: int32 + type: integer + controller: + format: int32 + type: integer + type: object + type: object + restart: + type: object + timeout: + type: string + tls: + properties: + certificates: + items: + properties: + alias: + type: string + dnsNames: + items: + type: string + type: array + duration: + type: string + emailAddresses: + items: + type: string + type: array + ipAddresses: + items: + type: string + type: array + issuerRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + privateKey: + properties: + encoding: + enum: + - PKCS1 + - PKCS8 + type: string + type: object + renewBefore: + type: string + secretName: + type: string + subject: + properties: + countries: + items: + type: string + type: array + localities: + items: + type: string + type: array + organizationalUnits: + items: + type: string + type: array + organizations: + items: + type: string + type: array + postalCodes: + items: + type: string + type: array + provinces: + items: + type: string + type: array + serialNumber: + type: string + streetAddresses: + items: + type: string + type: array + type: object + uris: + items: + type: string + type: array + required: + - alias + type: object + type: array + issuerRef: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + remove: + type: boolean + rotateCertificates: + type: boolean + type: object + type: + enum: + - UpdateVersion + - HorizontalScaling + - VerticalScaling + - VolumeExpansion + - Restart + - Reconfigure + - ReconfigureTLS + type: string + updateVersion: + properties: + targetVersion: + type: string + type: object + verticalScaling: + properties: + node: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + topology: + properties: + broker: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + controller: + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + type: object + type: object + volumeExpansion: + properties: + mode: + default: Online + enum: + - Offline + - Online + type: string + node: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + topology: + properties: + broker: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + controller: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + required: + - databaseRef + - type + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + observedGeneration: + format: int64 + type: integer + reason: + type: string + severity: + type: string + status: + type: string + type: + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + observedGeneration: + format: int64 + type: integer + phase: + enum: + - Pending + - Progressing + - Successful + - WaitingForApproval + - Failed + - Approved + - Denied + - Skipped + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {}