diff --git a/.github/scripts/deploy-fleet.sh b/.github/scripts/deploy-fleet.sh index 00a8a24723..7aa7e297ce 100755 --- a/.github/scripts/deploy-fleet.sh +++ b/.github/scripts/deploy-fleet.sh @@ -66,6 +66,8 @@ eventually helm upgrade --install fleet charts/fleet \ $shards_settings \ --set-string extraEnv[0].name=EXPERIMENTAL_OCI_STORAGE \ --set-string extraEnv[0].value=true \ + --set-string extraEnv[1].name=EXPERIMENTAL_HELM_OPS \ + --set-string extraEnv[1].value=true \ --set garbageCollectionInterval=1s \ --set debug=true --set debugLevel=1 diff --git a/.github/scripts/label-downstream-cluster.sh b/.github/scripts/label-downstream-cluster.sh index 20549a78cd..2c323e840a 100755 --- a/.github/scripts/label-downstream-cluster.sh +++ b/.github/scripts/label-downstream-cluster.sh @@ -4,6 +4,12 @@ set -euxo pipefail ns=${FLEET_E2E_NS_DOWNSTREAM-fleet-default} -{ grep -q -m 1 -e "1/1"; kill $!; } < <(kubectl get clusters.fleet.cattle.io -n "$ns" -w) -name=$(kubectl get clusters.fleet.cattle.io -o=jsonpath='{.items[0].metadata.name}' -n "$ns") -kubectl patch clusters.fleet.cattle.io -n "$ns" "$name" --type=json -p '[{"op": "add", "path": "/metadata/labels/env", "value": "test" }]' +# Wait for clusters to become "ready" by waiting for bundles to become ready. +num_clusters=$(k3d cluster list -o json | jq -r '.[].name | select( . | contains("downstream") )' | wc -l) +while [[ $(kubectl get clusters.fleet.cattle.io -n "$ns" | grep '1/1' -c) -ne $num_clusters ]]; do + sleep 1 +done + +for cluster in $(kubectl get clusters.fleet.cattle.io -n "$ns" -o=jsonpath='{.items[*].metadata.name}'); do + kubectl patch clusters.fleet.cattle.io -n "$ns" "$cluster" --type=json -p '[{"op": "add", "path": "/metadata/labels/env", "value": "test" }]' +done diff --git a/.github/workflows/e2e-ci.yml b/.github/workflows/e2e-ci.yml index abeec720a1..809028fb5e 100644 --- a/.github/workflows/e2e-ci.yml +++ b/.github/workflows/e2e-ci.yml @@ -98,8 +98,8 @@ jobs: FLEET_E2E_NS: fleet-local run: | ginkgo --github-output --label-filter='sharding' e2e/single-cluster - ginkgo --github-output e2e/metrics - SHARD=shard1 ginkgo --github-output e2e/metrics + ginkgo --github-output --label-filter='!oci-registry' e2e/metrics + SHARD=shard1 ginkgo --github-output --label-filter='!oci-registry' e2e/metrics - name: Create Zot certificates for OCI tests if: ${{ matrix.test_type.name == 'infra-setup' }} @@ -134,6 +134,7 @@ jobs: # Run tests requiring an OCI registry e2e/testenv/infra/infra setup --oci-registry=true ginkgo --github-output --label-filter='oci-registry' e2e/single-cluster + ginkgo --github-output --label-filter='oci-registry' e2e/metrics e2e/testenv/infra/infra teardown - diff --git a/charts/fleet-agent/templates/deployment.yaml b/charts/fleet-agent/templates/deployment.yaml index 8ca21dc724..d689cd9f15 100644 --- a/charts/fleet-agent/templates/deployment.yaml +++ b/charts/fleet-agent/templates/deployment.yaml @@ -70,6 +70,8 @@ spec: - ALL {{- end }} volumeMounts: + - mountPath: /tmp + name: tmp - mountPath: /.kube name: kube - env: @@ -97,6 +99,8 @@ spec: - ALL {{- end }} volumes: + - name: tmp + emptyDir: {} - name: kube emptyDir: {} serviceAccountName: fleet-agent diff --git a/charts/fleet-agent/templates/validate.yaml b/charts/fleet-agent/templates/validate.yaml index d53ff1c508..5333818183 100644 --- a/charts/fleet-agent/templates/validate.yaml +++ b/charts/fleet-agent/templates/validate.yaml @@ -1,9 +1,9 @@ {{if ne .Release.Namespace .Values.internal.systemNamespace }} -{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.systemNamespace) }} +{{ fail (printf "This chart must be installed in the %s namespace" .Values.internal.systemNamespace) }} {{end}} {{if ne .Release.Name .Values.internal.managedReleaseName }} -{{ fail (printf "This chart must be installed in the namespace %s as the release name fleet-agent" .Values.internal.managedReleaseName) }} +{{ fail (printf "This chart must be installed with release name %s" .Values.internal.managedReleaseName) }} {{end}} {{if not .Values.apiServerURL }} diff --git a/charts/fleet-crd/templates/crds.yaml b/charts/fleet-crd/templates/crds.yaml index c96aec26e6..fa2637da3e 100644 --- a/charts/fleet-crd/templates/crds.yaml +++ b/charts/fleet-crd/templates/crds.yaml @@ -162,6 +162,23 @@ spec: description: DeploymentID is the ID of the currently applied deployment. nullable: true type: string + helmChartOptions: + description: 'HelmChartOptions is not nil and has the helm chart + config details when contents + + should be downloaded from a helm chart' + properties: + helmAppInsecureSkipTLSVerify: + description: InsecureSkipTLSverify will use insecure HTTPS to + clone the helm app resource. + type: boolean + helmAppSecretName: + description: 'SecretName stores the secret name for storing + credentials when accessing + + a remote helm repository defined in a HelmApp resource' + type: string + type: object ociContents: description: OCIContents is true when this deployment's contents is stored in an oci registry @@ -1584,6 +1601,27 @@ spec: will wait for as long as timeoutSeconds' type: boolean type: object + helmAppOptions: + description: 'HelmAppOptions stores the options relative to HelmApp + resources + + Non-nil HelmAppOptions indicate that the source of resources is + a Helm chart, + + not a git repository.' + nullable: true + properties: + helmAppInsecureSkipTLSVerify: + description: InsecureSkipTLSverify will use insecure HTTPS to + clone the helm app resource. + type: boolean + helmAppSecretName: + description: 'SecretName stores the secret name for storing + credentials when accessing + + a remote helm repository defined in a HelmApp resource' + type: string + type: object ignore: description: IgnoreOptions can be used to ignore fields when monitoring the bundle. @@ -5692,7 +5730,7 @@ spec: that are ready. type: integer resourceCounts: - description: ResourceCounts is an aggregate over the GitRepoResourceCounts. + description: ResourceCounts is an aggregate over the ResourceCounts. properties: desiredReady: description: DesiredReady is the number of resources that should @@ -6464,7 +6502,7 @@ spec: description: 'Conditions is a list of Wrangler conditions that describe the state - of the GitRepo.' + of the resource.' items: properties: lastTransitionTime: @@ -6495,7 +6533,7 @@ spec: type: array desiredReadyClusters: description: "DesiredReadyClusters\tis the number of clusters that\ - \ should be ready for bundles of this GitRepo." + \ should be ready for bundles of this resource." type: integer display: description: Display contains a human readable summary of the status. @@ -6515,7 +6553,7 @@ spec: bundledeployments.' type: string state: - description: 'State is the state of the GitRepo, e.g. "GitUpdating" + description: 'State is the state of the resource, e.g. "GitUpdating" or the maximal BundleState according to StateRank.' @@ -6547,7 +6585,7 @@ spec: description: 'ReadyClusters is the lowest number of clusters that are ready over - all the bundles of this GitRepo.' + all the bundles of this resource.' type: integer resourceCounts: description: ResourceCounts contains the number of resources in @@ -6595,8 +6633,8 @@ spec: description: Resources contains metadata about the resources of each bundle. items: - description: GitRepoResource contains metadata about the resources - of a bundle. + description: Resource contains metadata about the resources of + a bundle. properties: apiVersion: description: APIVersion is the API version of the resource. @@ -6886,6 +6924,1800 @@ spec: --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: helmapps.fleet.cattle.io +spec: + group: fleet.cattle.io + names: + categories: + - fleet + kind: HelmApp + listKind: HelmAppList + plural: helmapps + singular: helmapp + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.helm.repo + name: Repo + type: string + - jsonPath: .spec.helm.chart + name: Chart + type: string + - jsonPath: .status.version + name: Version + type: string + - jsonPath: .status.display.readyBundleDeployments + name: BundleDeployments-Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: 'HelmApp describes a helm chart information. + + The resource contains the necessary information to deploy the chart to + target clusters.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. + + Servers should convert recognized schemas to the latest internal value, + and + + may reject unrecognized values. + + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. + + Servers may infer this from the endpoint the client submits requests + to. + + Cannot be updated. + + In CamelCase. + + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + contentsId: + description: ContentsID stores the contents id when deploying contents + using an OCI registry. + nullable: true + type: string + correctDrift: + description: CorrectDrift specifies how drift correction should + work. + properties: + enabled: + description: Enabled correct drift if true. + type: boolean + force: + description: Force helm rollback with --force option will be + used if true. This will try to recreate all resources in the + release. + type: boolean + keepFailHistory: + description: KeepFailHistory keeps track of failed rollbacks + in the helm history. + type: boolean + type: object + defaultNamespace: + description: 'DefaultNamespace is the namespace to use for resources + that do not + + specify a namespace. This field is not used to enforce or lock + down + + the deployment to a specific namespace.' + nullable: true + type: string + deleteCRDResources: + description: DeleteCRDResources deletes CRDs. Warning! this will + also delete all your Custom Resources. + type: boolean + deleteNamespace: + description: DeleteNamespace can be used to delete the deployed + namespace when removing the bundle + type: boolean + dependsOn: + description: DependsOn refers to the bundles which must be ready + before this bundle can be deployed. + items: + properties: + name: + description: Name of the bundle. + nullable: true + type: string + selector: + description: Selector matching bundle's labels. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a selector + that contains values, a key, and an operator that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the operator + is Exists or DoesNotExist, + + the values array must be empty. This array is + replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains only + "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + type: object + nullable: true + type: array + diff: + description: Diff can be used to ignore the modified state of objects + which are amended at runtime. + nullable: true + properties: + comparePatches: + description: ComparePatches match a resource and remove fields + from the check for modifications. + items: + description: ComparePatch matches a resource and removes fields + from the check for modifications. + properties: + apiVersion: + description: APIVersion is the apiVersion of the resource + to match. + nullable: true + type: string + jsonPointers: + description: JSONPointers ignore diffs at a certain JSON + path. + items: + type: string + nullable: true + type: array + kind: + description: Kind is the kind of the resource to match. + nullable: true + type: string + name: + description: Name is the name of the resource to match. + nullable: true + type: string + namespace: + description: Namespace is the namespace of the resource + to match. + nullable: true + type: string + operations: + description: Operations remove a JSON path from the resource. + items: + description: Operation of a ComparePatch, usually "remove". + properties: + op: + description: Op is usually "remove" + nullable: true + type: string + path: + description: Path is the JSON path to remove. + nullable: true + type: string + value: + description: Value is usually empty. + nullable: true + type: string + type: object + nullable: true + type: array + type: object + nullable: true + type: array + type: object + forceSyncGeneration: + description: ForceSyncGeneration is used to force a redeployment + format: int64 + type: integer + helm: + description: Helm options for the deployment, like the chart name, + repo and values. + properties: + atomic: + description: Atomic sets the --atomic flag when Helm is performing + an upgrade + type: boolean + chart: + description: 'Chart can refer to any go-getter URL or OCI registry + based helm + + chart URL. The chart will be downloaded.' + nullable: true + type: string + disableDNS: + description: DisableDNS can be used to customize Helm's EnableDNS + option, which Fleet sets to `true` by default. + type: boolean + disableDependencyUpdate: + description: DisableDependencyUpdate allows skipping chart dependencies + update + type: boolean + disablePreProcess: + description: DisablePreProcess disables template processing + in values + type: boolean + force: + description: Force allows to override immutable resources. This + could be dangerous. + type: boolean + maxHistory: + description: MaxHistory limits the maximum number of revisions + saved per release by Helm. + type: integer + releaseName: + description: 'ReleaseName sets a custom release name to deploy + the chart as. If + + not specified a release name will be generated by combining + the + + invoking GitRepo.name + GitRepo.path.' + maxLength: 53 + nullable: true + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + repo: + description: Repo is the name of the HTTPS helm repo to download + the chart from. + nullable: true + type: string + skipSchemaValidation: + description: SkipSchemaValidation allows skipping schema validation + against the chart values + type: boolean + takeOwnership: + description: TakeOwnership makes helm skip the check for its + own annotations + type: boolean + timeoutSeconds: + description: TimeoutSeconds is the time to wait for Helm operations. + type: integer + values: + description: 'Values passed to Helm. It is possible to specify + the keys and values + + as go template strings.' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + valuesFiles: + description: ValuesFiles is a list of files to load values from. + items: + type: string + nullable: true + type: array + valuesFrom: + description: ValuesFrom loads the values from configmaps and + secrets. + items: + description: 'Define helm values that can come from configmap, + secret or external. Credit: https://github.com/fluxcd/helm-operator/blob/0cfea875b5d44bea995abe7324819432070dfbdc/pkg/apis/helm.fluxcd.io/v1/types_helmrelease.go#L439' + properties: + configMapKeyRef: + description: The reference to a config map with release + values. + nullable: true + properties: + key: + nullable: true + type: string + name: + description: Name of a resource in the same namespace + as the referent. + nullable: true + type: string + namespace: + nullable: true + type: string + type: object + secretKeyRef: + description: The reference to a secret with release values. + nullable: true + properties: + key: + nullable: true + type: string + name: + description: Name of a resource in the same namespace + as the referent. + nullable: true + type: string + namespace: + nullable: true + type: string + type: object + type: object + nullable: true + type: array + version: + description: Version of the chart to download + nullable: true + type: string + waitForJobs: + description: 'WaitForJobs if set and timeoutSeconds provided, + will wait until all + + Jobs have been completed before marking the GitRepo as ready. + It + + will wait for as long as timeoutSeconds' + type: boolean + type: object + helmAppOptions: + description: 'HelmAppOptions stores the options relative to HelmApp + resources + + Non-nil HelmAppOptions indicate that the source of resources is + a Helm chart, + + not a git repository.' + nullable: true + properties: + helmAppInsecureSkipTLSVerify: + description: InsecureSkipTLSverify will use insecure HTTPS to + clone the helm app resource. + type: boolean + helmAppSecretName: + description: 'SecretName stores the secret name for storing + credentials when accessing + + a remote helm repository defined in a HelmApp resource' + type: string + type: object + helmSecretName: + description: 'HelmSecretName contains the auth secret with the credentials + to access + + a private Helm repository.' + nullable: true + type: string + ignore: + description: IgnoreOptions can be used to ignore fields when monitoring + the bundle. + properties: + conditions: + description: Conditions is a list of conditions to be ignored + when monitoring the Bundle. + items: + additionalProperties: + type: string + type: object + nullable: true + type: array + type: object + insecureSkipTLSVerify: + description: InsecureSkipTLSverify will use insecure HTTPS to clone + the helm app resource. + type: boolean + keepResources: + description: KeepResources can be used to keep the deployed resources + when removing the bundle + type: boolean + kustomize: + description: 'Kustomize options for the deployment, like the dir + containing the + + kustomization.yaml file.' + nullable: true + properties: + dir: + description: 'Dir points to a custom folder for kustomize resources. + This folder must contain + + a kustomization.yaml file.' + nullable: true + type: string + type: object + labels: + additionalProperties: + type: string + description: 'Labels are copied to the bundle and can be used in + a + + dependsOn.selector.' + type: object + namespace: + description: 'TargetNamespace if present will assign all resource + to this + + namespace and if any cluster scoped resource exists the deployment + + will fail.' + nullable: true + type: string + namespaceAnnotations: + additionalProperties: + type: string + description: NamespaceAnnotations are annotations that will be appended + to the namespace created by Fleet. + nullable: true + type: object + namespaceLabels: + additionalProperties: + type: string + description: NamespaceLabels are labels that will be appended to + the namespace created by Fleet. + nullable: true + type: object + paused: + description: Paused if set to true, will stop any BundleDeployments + from being updated. It will be marked as out of sync. + type: boolean + resources: + description: 'Resources contains the resources that were read from + the bundle''s + + path. This includes the content of downloaded helm charts.' + items: + description: BundleResource represents the content of a single + resource from the bundle, like a YAML manifest. + properties: + content: + description: The content of the resource, can be compressed. + nullable: true + type: string + encoding: + description: Encoding is either empty or "base64+gz". + nullable: true + type: string + name: + description: Name of the resource, can include the bundle's + internal path. + nullable: true + type: string + type: object + nullable: true + type: array + rolloutStrategy: + description: 'RolloutStrategy controls the rollout of bundles, by + defining + + partitions, canaries and percentages for cluster availability.' + nullable: true + properties: + autoPartitionSize: + anyOf: + - type: integer + - type: string + description: 'A number or percentage of how to automatically + partition clusters if no + + specific partitioning strategy is configured. + + default: 25%' + nullable: true + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: 'A number or percentage of clusters that can be + unavailable during an update + + of a bundle. This follows the same basic approach as a deployment + rollout + + strategy. Once the number of clusters meets unavailable state + update will be + + paused. Default value is 100% which doesn''t take effect on + update. + + default: 100%' + nullable: true + x-kubernetes-int-or-string: true + maxUnavailablePartitions: + anyOf: + - type: integer + - type: string + description: 'A number or percentage of cluster partitions that + can be unavailable during + + an update of a bundle. + + default: 0' + nullable: true + x-kubernetes-int-or-string: true + partitions: + description: 'A list of definitions of partitions. If any target + clusters do not match + + the configuration they are added to partitions at the end + following the + + autoPartitionSize.' + items: + description: Partition defines a separate rollout strategy + for a set of clusters. + properties: + clusterGroup: + description: A cluster group name to include in this partition + type: string + clusterGroupSelector: + description: Selector matching cluster group labels to + include in this partition + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a + selector that contains values, a key, and an operator + that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and + DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the + operator is Exists or DoesNotExist, + + the values array must be empty. This array + is replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains + only "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + clusterName: + description: ClusterName is the name of a cluster to include + in this partition + type: string + clusterSelector: + description: Selector matching cluster labels to include + in this partition + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a + selector that contains values, a key, and an operator + that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and + DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the + operator is Exists or DoesNotExist, + + the values array must be empty. This array + is replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains + only "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: 'A number or percentage of clusters that + can be unavailable in this + + partition before this partition is treated as done. + + default: 10%' + x-kubernetes-int-or-string: true + name: + description: A user-friendly name given to the partition + used for Display (optional). + nullable: true + type: string + type: object + nullable: true + type: array + type: object + serviceAccount: + description: ServiceAccount which will be used to perform this deployment. + nullable: true + type: string + targetRestrictions: + description: TargetRestrictions is an allow list, which controls + if a bundledeployment is created for a target. + items: + description: 'BundleTargetRestriction is used internally by Fleet + and should not be modified. + + It acts as an allow list, to prevent the creation of BundleDeployments + from + + Targets created by TargetCustomizations in fleet.yaml.' + properties: + clusterGroup: + nullable: true + type: string + clusterGroupSelector: + description: 'A label selector is a label query over a set + of resources. The result of matchLabels and + + matchExpressions are ANDed. An empty label selector matches + all objects. A null + + label selector matches no objects.' + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a selector + that contains values, a key, and an operator that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the operator + is Exists or DoesNotExist, + + the values array must be empty. This array is + replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains only + "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + clusterName: + nullable: true + type: string + clusterSelector: + description: 'A label selector is a label query over a set + of resources. The result of matchLabels and + + matchExpressions are ANDed. An empty label selector matches + all objects. A null + + label selector matches no objects.' + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a selector + that contains values, a key, and an operator that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the operator + is Exists or DoesNotExist, + + the values array must be empty. This array is + replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains only + "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + name: + nullable: true + type: string + type: object + type: array + targets: + description: 'Targets refer to the clusters which will be deployed + to. + + Targets are evaluated in order and the first one to match is used.' + items: + description: 'BundleTarget declares clusters to deploy to. Fleet + will merge the + + BundleDeploymentOptions from customizations into this struct.' + properties: + clusterGroup: + description: ClusterGroup to match a specific cluster group + by name. + nullable: true + type: string + clusterGroupSelector: + description: ClusterGroupSelector is a selector to match cluster + groups. + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a selector + that contains values, a key, and an operator that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the operator + is Exists or DoesNotExist, + + the values array must be empty. This array is + replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains only + "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + clusterName: + description: 'ClusterName to match a specific cluster by name + that will be + + selected' + nullable: true + type: string + clusterSelector: + description: 'ClusterSelector is a selector to match clusters. + The structure is + + the standard metav1.LabelSelector format. If clusterGroupSelector + or + + clusterGroup is specified, clusterSelector will be used + only to + + further refine the selection after clusterGroupSelector + and + + clusterGroup is evaluated.' + nullable: true + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: 'A label selector requirement is a selector + that contains values, a key, and an operator that + + relates the key and values.' + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: 'operator represents a key''s relationship + to a set of values. + + Valid operators are In, NotIn, Exists and DoesNotExist.' + type: string + values: + description: 'values is an array of string values. + If the operator is In or NotIn, + + the values array must be non-empty. If the operator + is Exists or DoesNotExist, + + the values array must be empty. This array is + replaced during a strategic + + merge patch.' + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: 'matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels + + map is equivalent to an element of matchExpressions, + whose key field is "key", the + + operator is "In", and the values array contains only + "value". The requirements are ANDed.' + type: object + type: object + x-kubernetes-map-type: atomic + correctDrift: + description: CorrectDrift specifies how drift correction should + work. + properties: + enabled: + description: Enabled correct drift if true. + type: boolean + force: + description: Force helm rollback with --force option will + be used if true. This will try to recreate all resources + in the release. + type: boolean + keepFailHistory: + description: KeepFailHistory keeps track of failed rollbacks + in the helm history. + type: boolean + type: object + defaultNamespace: + description: 'DefaultNamespace is the namespace to use for + resources that do not + + specify a namespace. This field is not used to enforce or + lock down + + the deployment to a specific namespace.' + nullable: true + type: string + deleteCRDResources: + description: DeleteCRDResources deletes CRDs. Warning! this + will also delete all your Custom Resources. + type: boolean + deleteNamespace: + description: DeleteNamespace can be used to delete the deployed + namespace when removing the bundle + type: boolean + diff: + description: Diff can be used to ignore the modified state + of objects which are amended at runtime. + nullable: true + properties: + comparePatches: + description: ComparePatches match a resource and remove + fields from the check for modifications. + items: + description: ComparePatch matches a resource and removes + fields from the check for modifications. + properties: + apiVersion: + description: APIVersion is the apiVersion of the + resource to match. + nullable: true + type: string + jsonPointers: + description: JSONPointers ignore diffs at a certain + JSON path. + items: + type: string + nullable: true + type: array + kind: + description: Kind is the kind of the resource to + match. + nullable: true + type: string + name: + description: Name is the name of the resource to + match. + nullable: true + type: string + namespace: + description: Namespace is the namespace of the resource + to match. + nullable: true + type: string + operations: + description: Operations remove a JSON path from + the resource. + items: + description: Operation of a ComparePatch, usually + "remove". + properties: + op: + description: Op is usually "remove" + nullable: true + type: string + path: + description: Path is the JSON path to remove. + nullable: true + type: string + value: + description: Value is usually empty. + nullable: true + type: string + type: object + nullable: true + type: array + type: object + nullable: true + type: array + type: object + doNotDeploy: + description: DoNotDeploy if set to true, will not deploy to + this target. + type: boolean + forceSyncGeneration: + description: ForceSyncGeneration is used to force a redeployment + format: int64 + type: integer + helm: + description: Helm options for the deployment, like the chart + name, repo and values. + properties: + atomic: + description: Atomic sets the --atomic flag when Helm is + performing an upgrade + type: boolean + chart: + description: 'Chart can refer to any go-getter URL or + OCI registry based helm + + chart URL. The chart will be downloaded.' + nullable: true + type: string + disableDNS: + description: DisableDNS can be used to customize Helm's + EnableDNS option, which Fleet sets to `true` by default. + type: boolean + disableDependencyUpdate: + description: DisableDependencyUpdate allows skipping chart + dependencies update + type: boolean + disablePreProcess: + description: DisablePreProcess disables template processing + in values + type: boolean + force: + description: Force allows to override immutable resources. + This could be dangerous. + type: boolean + maxHistory: + description: MaxHistory limits the maximum number of revisions + saved per release by Helm. + type: integer + releaseName: + description: 'ReleaseName sets a custom release name to + deploy the chart as. If + + not specified a release name will be generated by combining + the + + invoking GitRepo.name + GitRepo.path.' + maxLength: 53 + nullable: true + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + repo: + description: Repo is the name of the HTTPS helm repo to + download the chart from. + nullable: true + type: string + skipSchemaValidation: + description: SkipSchemaValidation allows skipping schema + validation against the chart values + type: boolean + takeOwnership: + description: TakeOwnership makes helm skip the check for + its own annotations + type: boolean + timeoutSeconds: + description: TimeoutSeconds is the time to wait for Helm + operations. + type: integer + values: + description: 'Values passed to Helm. It is possible to + specify the keys and values + + as go template strings.' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + valuesFiles: + description: ValuesFiles is a list of files to load values + from. + items: + type: string + nullable: true + type: array + valuesFrom: + description: ValuesFrom loads the values from configmaps + and secrets. + items: + description: 'Define helm values that can come from + configmap, secret or external. Credit: https://github.com/fluxcd/helm-operator/blob/0cfea875b5d44bea995abe7324819432070dfbdc/pkg/apis/helm.fluxcd.io/v1/types_helmrelease.go#L439' + properties: + configMapKeyRef: + description: The reference to a config map with + release values. + nullable: true + properties: + key: + nullable: true + type: string + name: + description: Name of a resource in the same + namespace as the referent. + nullable: true + type: string + namespace: + nullable: true + type: string + type: object + secretKeyRef: + description: The reference to a secret with release + values. + nullable: true + properties: + key: + nullable: true + type: string + name: + description: Name of a resource in the same + namespace as the referent. + nullable: true + type: string + namespace: + nullable: true + type: string + type: object + type: object + nullable: true + type: array + version: + description: Version of the chart to download + nullable: true + type: string + waitForJobs: + description: 'WaitForJobs if set and timeoutSeconds provided, + will wait until all + + Jobs have been completed before marking the GitRepo + as ready. It + + will wait for as long as timeoutSeconds' + type: boolean + type: object + ignore: + description: IgnoreOptions can be used to ignore fields when + monitoring the bundle. + properties: + conditions: + description: Conditions is a list of conditions to be + ignored when monitoring the Bundle. + items: + additionalProperties: + type: string + type: object + nullable: true + type: array + type: object + keepResources: + description: KeepResources can be used to keep the deployed + resources when removing the bundle + type: boolean + kustomize: + description: 'Kustomize options for the deployment, like the + dir containing the + + kustomization.yaml file.' + nullable: true + properties: + dir: + description: 'Dir points to a custom folder for kustomize + resources. This folder must contain + + a kustomization.yaml file.' + nullable: true + type: string + type: object + name: + description: 'Name of target. This value is largely for display + and logging. If + + not specified a default name of the format "target000" will + be used' + type: string + namespace: + description: 'TargetNamespace if present will assign all resource + to this + + namespace and if any cluster scoped resource exists the + deployment + + will fail.' + nullable: true + type: string + namespaceAnnotations: + additionalProperties: + type: string + description: NamespaceAnnotations are annotations that will + be appended to the namespace created by Fleet. + nullable: true + type: object + namespaceLabels: + additionalProperties: + type: string + description: NamespaceLabels are labels that will be appended + to the namespace created by Fleet. + nullable: true + type: object + serviceAccount: + description: ServiceAccount which will be used to perform + this deployment. + nullable: true + type: string + yaml: + description: 'YAML options, if using raw YAML these are names + that map to + + overlays/{name} files that will be used to replace or patch + a resource.' + nullable: true + properties: + overlays: + description: 'Overlays is a list of names that maps to + folders in "overlays/". + + If you wish to customize the file ./subdir/resource.yaml + then a file + + ./overlays/myoverlay/subdir/resource.yaml will replace + the base + + file. + + A file named ./overlays/myoverlay/subdir/resource_patch.yaml + will patch the base file.' + items: + type: string + nullable: true + type: array + type: object + type: object + type: array + yaml: + description: 'YAML options, if using raw YAML these are names that + map to + + overlays/{name} files that will be used to replace or patch a + resource.' + nullable: true + properties: + overlays: + description: 'Overlays is a list of names that maps to folders + in "overlays/". + + If you wish to customize the file ./subdir/resource.yaml then + a file + + ./overlays/myoverlay/subdir/resource.yaml will replace the + base + + file. + + A file named ./overlays/myoverlay/subdir/resource_patch.yaml + will patch the base file.' + items: + type: string + nullable: true + type: array + type: object + type: object + status: + properties: + conditions: + description: 'Conditions is a list of Wrangler conditions that describe + the state + + of the resource.' + items: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + lastUpdateTime: + description: The last time this condition was updated. + type: string + message: + description: Human-readable message indicating details about + last transition + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of cluster condition. + type: string + required: + - status + - type + type: object + type: array + desiredReadyClusters: + description: "DesiredReadyClusters\tis the number of clusters that\ + \ should be ready for bundles of this resource." + type: integer + display: + description: Display contains a human readable summary of the status. + properties: + error: + description: Error is true if a message is present. + type: boolean + message: + description: Message contains the relevant message from the + deployment conditions. + type: string + readyBundleDeployments: + description: 'ReadyBundleDeployments is a string in the form + "%d/%d", that describes the + + number of ready bundledeployments over the total number of + bundledeployments.' + type: string + state: + description: 'State is the state of the resource, e.g. "GitUpdating" + or the maximal + + BundleState according to StateRank.' + type: string + type: object + readyClusters: + description: 'ReadyClusters is the lowest number of clusters that + are ready over + + all the bundles of this resource.' + type: integer + resourceCounts: + description: ResourceCounts contains the number of resources in + each state over all bundles. + properties: + desiredReady: + description: DesiredReady is the number of resources that should + be ready. + type: integer + missing: + description: Missing is the number of missing resources. + type: integer + modified: + description: Modified is the number of resources that have been + modified. + type: integer + notReady: + description: 'NotReady is the number of not ready resources. + Resources are not + + ready if they do not match any other state.' + type: integer + orphaned: + description: Orphaned is the number of orphaned resources. + type: integer + ready: + description: Ready is the number of ready resources. + type: integer + unknown: + description: Unknown is the number of resources in an unknown + state. + type: integer + waitApplied: + description: WaitApplied is the number of resources that are + waiting to be applied. + type: integer + type: object + resourceErrors: + description: ResourceErrors is a sorted list of errors from the + resources. + items: + type: string + type: array + resources: + description: Resources contains metadata about the resources of + each bundle. + items: + description: Resource contains metadata about the resources of + a bundle. + properties: + apiVersion: + description: APIVersion is the API version of the resource. + nullable: true + type: string + error: + description: Error is true if any Error in the PerClusterState + is true. + type: boolean + id: + description: ID is the name of the resource, e.g. "namespace1/my-config" + or "backingimagemanagers.storage.io". + nullable: true + type: string + incompleteState: + description: 'IncompleteState is true if a bundle summary + has 10 or more non-ready + + resources or a non-ready resource has more 10 or more non-ready + or + + modified states.' + type: boolean + kind: + description: Kind is the k8s kind of the resource. + nullable: true + type: string + message: + description: Message is the first message from the PerClusterStates. + nullable: true + type: string + name: + description: Name of the resource. + nullable: true + type: string + namespace: + description: Namespace of the resource. + nullable: true + type: string + perClusterState: + description: PerClusterState is a list of states for each + cluster. Derived from the summaries non-ready resources. + items: + description: ResourcePerClusterState is generated for each + non-ready resource of the bundles. + properties: + clusterId: + description: ClusterID is the id of the cluster. + nullable: true + type: string + error: + description: Error is true if the resource is in an + error state, copied from the bundle's summary for + non-ready resources. + type: boolean + message: + description: Message combines the messages from the + bundle's summary. Messages are joined with the delimiter + ';'. + nullable: true + type: string + patch: + description: Patch for modified resources. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + state: + description: State is the state of the resource. + nullable: true + type: string + transitioning: + description: 'Transitioning is true if the resource + is in a transitioning state, + + copied from the bundle''s summary for non-ready resources.' + type: boolean + type: object + nullable: true + type: array + state: + description: State is the state of the resource, e.g. "Unknown", + "WaitApplied", "ErrApplied" or "Ready". + type: string + transitioning: + description: Transitioning is true if any Transitioning in + the PerClusterState is true. + type: boolean + type: + description: Type is the type of the resource, e.g. "apiextensions.k8s.io.customresourcedefinition" + or "configmap". + type: string + type: object + type: array + summary: + description: Summary contains the number of bundle deployments in + each state and a list of non-ready resources. + properties: + desiredReady: + description: 'DesiredReady is the number of bundle deployments + that should be + + ready.' + type: integer + errApplied: + description: 'ErrApplied is the number of bundle deployments + that have been synced + + from the Fleet controller and the downstream cluster, but + with some + + errors when deploying the bundle.' + type: integer + modified: + description: 'Modified is the number of bundle deployments that + have been deployed + + and for which all resources are ready, but where some changes + from the + + Git repository have not yet been synced.' + type: integer + nonReadyResources: + description: 'NonReadyClusters is a list of states, which is + filled for a bundle + + that is not ready.' + items: + description: 'NonReadyResource contains information about + a bundle that is not ready for a + + given state like "ErrApplied". It contains a list of non-ready + or modified + + resources and their states.' + properties: + bundleState: + description: State is the state of the resource, like + e.g. "NotReady" or "ErrApplied". + nullable: true + type: string + message: + description: Message contains information why the bundle + is not ready. + nullable: true + type: string + modifiedStatus: + description: ModifiedStatus lists the state for each modified + resource. + items: + description: 'ModifiedStatus is used to report the status + of a resource that is modified. + + It indicates if the modification was a create, a delete + or a patch.' + properties: + apiVersion: + nullable: true + type: string + delete: + type: boolean + exist: + description: Exist is true if the resource exists + but is not owned by us. This can happen if a resource + was adopted by another bundle whereas the first + bundle still exists and due to that reports that + it does not own it. + type: boolean + kind: + nullable: true + type: string + missing: + type: boolean + name: + nullable: true + type: string + namespace: + nullable: true + type: string + patch: + nullable: true + type: string + type: object + nullable: true + type: array + name: + description: Name is the name of the resource. + nullable: true + type: string + nonReadyStatus: + description: NonReadyStatus lists the state for each non-ready + resource. + items: + description: NonReadyStatus is used to report the status + of a resource that is not ready. It includes a summary. + properties: + apiVersion: + nullable: true + type: string + kind: + nullable: true + type: string + name: + nullable: true + type: string + namespace: + nullable: true + type: string + summary: + properties: + error: + type: boolean + message: + items: + type: string + type: array + state: + type: string + transitioning: + type: boolean + type: object + uid: + description: 'UID is a type that holds unique ID + values, including UUIDs. Because we + + don''t ONLY use UUIDs, this is an alias to string. Being + a type captures + + intent and helps make sure that UIDs and names + do not get conflated.' + nullable: true + type: string + type: object + nullable: true + type: array + type: object + nullable: true + type: array + notReady: + description: 'NotReady is the number of bundle deployments that + have been deployed + + where some resources are not ready.' + type: integer + outOfSync: + description: 'OutOfSync is the number of bundle deployments + that have been synced + + from Fleet controller, but not yet by the downstream agent.' + type: integer + pending: + description: 'Pending is the number of bundle deployments that + are being processed + + by Fleet controller.' + type: integer + ready: + description: 'Ready is the number of bundle deployments that + have been deployed + + where all resources are ready.' + type: integer + waitApplied: + description: 'WaitApplied is the number of bundle deployments + that have been + + synced from Fleet controller and downstream cluster, but are + waiting + + to be deployed.' + type: integer + type: object + version: + description: 'Version installed for the helm chart. + + When using * or empty version in the spec we get the latest version + from + + the helm repository when possible' + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.5 diff --git a/charts/fleet/ci/debug-values.yaml b/charts/fleet/ci/debug-values.yaml index ab519706db..fed47ac456 100644 --- a/charts/fleet/ci/debug-values.yaml +++ b/charts/fleet/ci/debug-values.yaml @@ -52,6 +52,8 @@ controller: extraEnv: - name: EXPERIMENTAL_OCI_STORAGE value: "true" + - name: EXPERIMENTAL_HELM_OPS + value: "true" shards: - id: shard0 diff --git a/charts/fleet/ci/nobootstrap-values.yaml b/charts/fleet/ci/nobootstrap-values.yaml index e4fbff5cd3..62ac3b942b 100644 --- a/charts/fleet/ci/nobootstrap-values.yaml +++ b/charts/fleet/ci/nobootstrap-values.yaml @@ -51,6 +51,8 @@ controller: extraEnv: - name: EXPERIMENTAL_OCI_STORAGE value: "true" + - name: EXPERIMENTAL_HELM_OPS + value: "true" shards: - id: shard0 diff --git a/charts/fleet/ci/nodebug-values.yaml b/charts/fleet/ci/nodebug-values.yaml index b4e898036a..ca23bfbd65 100644 --- a/charts/fleet/ci/nodebug-values.yaml +++ b/charts/fleet/ci/nodebug-values.yaml @@ -51,6 +51,8 @@ controller: extraEnv: - name: EXPERIMENTAL_OCI_STORAGE value: "true" + - name: EXPERIMENTAL_HELM_OPS + value: "true" shards: - id: shard0 diff --git a/charts/fleet/ci/nogitops-values.yaml b/charts/fleet/ci/nogitops-values.yaml index d76af71f65..2eddf54f64 100644 --- a/charts/fleet/ci/nogitops-values.yaml +++ b/charts/fleet/ci/nogitops-values.yaml @@ -51,6 +51,8 @@ controller: extraEnv: - name: EXPERIMENTAL_OCI_STORAGE value: "true" + - name: EXPERIMENTAL_HELM_OPS + value: "true" shards: - id: shard0 diff --git a/charts/fleet/templates/deployment_helmops.yaml b/charts/fleet/templates/deployment_helmops.yaml new file mode 100644 index 0000000000..47a71089a5 --- /dev/null +++ b/charts/fleet/templates/deployment_helmops.yaml @@ -0,0 +1,131 @@ +{{- if has (dict "name" "EXPERIMENTAL_HELM_OPS" "value" "true") .Values.extraEnv }} +{{- $shards := list (dict "id" "" "nodeSelector" dict) -}} +{{- $uniqueShards := list -}} +{{- if .Values.shards -}} + {{- range .Values.shards -}} + {{- if not (has .id $uniqueShards) -}} + {{- $shards = append $shards . -}} + {{- $uniqueShards = append $uniqueShards .id -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{ range $shard := $shards }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "helmops{{if $shard.id }}-shard-{{ $shard.id }}{{end}}" +spec: + selector: + matchLabels: + app: "helmops" + template: + metadata: + labels: + app: "helmops" + fleet.cattle.io/shard-id: "{{ $shard.id }}" + {{- if empty $shard.id }} + fleet.cattle.io/shard-default: "true" + {{- end }} + spec: + serviceAccountName: helmops + containers: + - image: "{{ template "system_default_registry" $ }}{{ $.Values.image.repository }}:{{ $.Values.image.tag }}" + name: helmops + {{- if $.Values.metrics.enabled }} + ports: + - containerPort: 8081 + name: metrics + {{- end }} + args: + - fleetcontroller + - helmops + {{- if $.Values.debug }} + - --debug + - --debug-level + - {{ quote $.Values.debugLevel }} + {{- end }} + {{- if $shard.id }} + - --shard-id + - {{ quote $shard.id }} + {{- end }} + {{- if not $.Values.metrics.enabled }} + - --disable-metrics + {{- end }} + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if $.Values.leaderElection.leaseDuration }} + - name: CATTLE_ELECTION_LEASE_DURATION + value: {{$.Values.leaderElection.leaseDuration}} + {{- end }} + {{- if $.Values.leaderElection.retryPeriod }} + - name: CATTLE_ELECTION_RETRY_PERIOD + value: {{$.Values.leaderElection.retryPeriod}} + {{- end }} + {{- if $.Values.leaderElection.renewDeadline }} + - name: CATTLE_ELECTION_RENEW_DEADLINE + value: {{$.Values.leaderElection.renewDeadline}} + {{- end }} + {{- if $.Values.proxy }} + - name: HTTP_PROXY + value: {{ $.Values.proxy }} + - name: HTTPS_PROXY + value: {{ $.Values.proxy }} + - name: NO_PROXY + value: {{ $.Values.noProxy }} + {{- end }} + {{- if $.Values.controller.reconciler.workers.gitrepo }} + - name: HELMOPS_RECONCILER_WORKERS + value: {{ quote $.Values.controller.reconciler.workers.gitrepo }} + {{- end }} +{{- if $.Values.extraEnv }} +{{ toYaml $.Values.extraEnv | indent 12}} +{{- end }} + {{- if $.Values.debug }} + - name: CATTLE_DEV_MODE + value: "true" + {{- end }} + {{- if not $.Values.disableSecurityContext }} + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + privileged: false + capabilities: + drop: + - ALL + {{- end }} + volumeMounts: + - mountPath: /tmp + name: tmp + nodeSelector: {{ include "linux-node-selector" $shard.id | nindent 8 }} +{{- if $.Values.nodeSelector }} +{{ toYaml $.Values.nodeSelector | indent 8 }} +{{- end }} +{{- if $shard.nodeSelector -}} +{{- range $key, $value := $shard.nodeSelector }} +{{ $key | indent 8}}: {{ $value }} +{{- end }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" $shard.id | nindent 8 }} +{{- if $.Values.tolerations }} +{{ toYaml $.Values.tolerations | indent 8 }} +{{- end }} + {{- if $.Values.priorityClassName }} + priorityClassName: "{{$.Values.priorityClassName}}" + {{- end }} + +{{- if not $.Values.disableSecurityContext }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + runAsGroup: 1000 +{{- end }} + volumes: + - name: tmp + emptyDir: {} +--- +{{- end }} +{{- end }} diff --git a/charts/fleet/templates/rbac_helmops.yaml b/charts/fleet/templates/rbac_helmops.yaml new file mode 100644 index 0000000000..bcafce8492 --- /dev/null +++ b/charts/fleet/templates/rbac_helmops.yaml @@ -0,0 +1,97 @@ +{{- if has (dict "name" "EXPERIMENTAL_HELM_OPS" "value" "true") .Values.extraEnv }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: helmops +rules: + - apiGroups: + - "" + resources: + - 'secrets' + verbs: + - "create" + - "list" + - apiGroups: + - "" + resources: + - 'configmaps' + verbs: + - '*' + - apiGroups: + - "fleet.cattle.io" + resources: + - "helmapps" + - "helmapps/status" + verbs: + - "*" + - apiGroups: + - "fleet.cattle.io" + resources: + - "bundles" + - "bundledeployments" + verbs: + - list + - delete + - get + - watch + - update + - create + - apiGroups: + - "" + resources: + - 'events' + verbs: + - "create" + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - "create" + - apiGroups: + - "" + resources: + - namespaces + verbs: + - "create" + - "delete" + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: helmops-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: helmops +subjects: + - kind: ServiceAccount + name: helmops + namespace: {{ .Release.Namespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: helmops +rules: + - apiGroups: + - "coordination.k8s.io" + resources: + - "leases" + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: helmops +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: helmops +subjects: + - kind: ServiceAccount + name: helmops +{{- end }} diff --git a/charts/fleet/templates/serviceaccount_helmops.yaml b/charts/fleet/templates/serviceaccount_helmops.yaml new file mode 100644 index 0000000000..84f393896d --- /dev/null +++ b/charts/fleet/templates/serviceaccount_helmops.yaml @@ -0,0 +1,6 @@ +{{- if has (dict "name" "EXPERIMENTAL_HELM_OPS" "value" "true") .Values.extraEnv }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: helmops +{{- end }} diff --git a/charts/fleet/values.yaml b/charts/fleet/values.yaml index 61d3157d5f..c691dc83ee 100644 --- a/charts/fleet/values.yaml +++ b/charts/fleet/values.yaml @@ -112,6 +112,8 @@ controller: # extraEnv: # - name: EXPERIMENTAL_OCI_STORAGE # value: "true" +# - name: EXPERIMENTAL_HELM_OPS +# value: "true" # shards: # - id: shard0 diff --git a/dev/import-images-k3d b/dev/import-images-k3d index eed770aca5..1f6af298d1 100755 --- a/dev/import-images-k3d +++ b/dev/import-images-k3d @@ -2,13 +2,26 @@ set -euxo pipefail +# The upstream cluster to import all the images to. upstream_ctx="${FLEET_E2E_CLUSTER-k3d-upstream}" + +# The single downstream cluster to import the agent image to. downstream_ctx="${FLEET_E2E_CLUSTER_DOWNSTREAM-k3d-downstream}" +# If multi-cluster is enabled, import the agent image to all downstream clusters. +FLEET_E2E_DS_CLUSTER_COUNT="${FLEET_E2E_DS_CLUSTER_COUNT:-1}" + k3d image import rancher/fleet:dev rancher/fleet-agent:dev -m direct -c "${upstream_ctx#k3d-}" if [ "$upstream_ctx" != "$downstream_ctx" ]; then - k3d image import rancher/fleet-agent:dev -m direct -c "${downstream_ctx#k3d-}" + if [ "$FLEET_E2E_DS_CLUSTER_COUNT" -gt 1 ]; then + for cluster in $(k3d cluster list -o json | \ + jq -r ".[].name | select(. | contains(\"${downstream_ctx#k3d-}\"))"); do + k3d image import rancher/fleet-agent:dev -m direct -c "${cluster}" + done + else + k3d image import rancher/fleet-agent:dev -m direct -c "${downstream_ctx#k3d-}" + fi else echo "not importing agent to any downstream clusters. Set FLEET_E2E_CLUSTER_DOWNSTREAM" fi diff --git a/dev/setup-fleet b/dev/setup-fleet index 60895e834f..81ce0ecbf0 100755 --- a/dev/setup-fleet +++ b/dev/setup-fleet @@ -42,6 +42,8 @@ helm -n cattle-fleet-system upgrade --install --create-namespace --wait --reset- $shards_settings \ --set-string extraEnv[0].name=EXPERIMENTAL_OCI_STORAGE \ --set-string extraEnv[0].value=true \ + --set-string extraEnv[1].name=EXPERIMENTAL_HELM_OPS \ + --set-string extraEnv[1].value=true \ --set garbageCollectionInterval=1s \ --set debug=true --set debugLevel=1 fleet charts/fleet diff --git a/dev/setup-fleet-managed-downstream b/dev/setup-fleet-managed-downstream index 0d8c29a3a8..f24756515c 100755 --- a/dev/setup-fleet-managed-downstream +++ b/dev/setup-fleet-managed-downstream @@ -8,14 +8,15 @@ if [ ! -d ./charts/fleet ]; then exit 1 fi -# fetching from local kubeconfig -host=$( docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' k3d-downstream-server-0 ) -ca=$( kubectl config view --flatten -o jsonpath='{.clusters[?(@.name == "k3d-downstream")].cluster.certificate-authority-data}' ) -client_cert=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-downstream")].user.client-certificate-data}' ) -token=$( kubectl config view --flatten -o jsonpath='{.users[?(@.name == "admin@k3d-downstream")].user.client-key-data}' ) -server="https://$host:6443" +for cluster in $(k3d cluster list -o json | jq -r '.[].name | select(. | contains("downstream"))'); do + # fetching from local kubeconfig + host=$( docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' k3d-$cluster-server-0 ) + ca=$( kubectl config view --flatten -o jsonpath="{.clusters[?(@.name == \"k3d-$cluster\")].cluster.certificate-authority-data}" ) + client_cert=$( kubectl config view --flatten -o jsonpath="{.users[?(@.name == \"admin@k3d-$cluster\")].user.client-certificate-data}" ) + token=$( kubectl config view --flatten -o jsonpath="{.users[?(@.name == \"admin@k3d-$cluster\")].user.client-key-data}" ) + server="https://$host:6443" -value=$(cat < "$TMP_CONFIG" + cat <"$TMP_CONFIG" mirrors: "docker.io": endpoint: @@ -22,8 +23,22 @@ fi # https://hub.docker.com/r/rancher/k3s/tags #args="$args -i docker.io/rancher/k3s:v1.22.15-k3s1" -k3d cluster create upstream --servers 3 --api-port 36443 -p '80:80@server:0' -p '443:443@server:0' --k3s-arg '--tls-san=k3d-upstream-server-0@server:0' $args -k3d cluster create downstream --servers 1 --api-port 36444 -p '5080:80@server:0' -p '3444:443@server:0' $args -#k3d cluster create downstream2 --servers 1 --api-port 36445 -p '6080:80@server:0' -p '3445:443@server:0' $args -#k3d cluster create downstream3 --servers 1 --api-port 36446 -p '7080:80@server:0' -p '3446:443@server:0' $args +k3d cluster create upstream \ + --servers 3 \ + --api-port 36443 \ + -p '80:80@server:0' \ + -p '443:443@server:0' \ + --k3s-arg '--tls-san=k3d-upstream-server-0@server:0' \ + $args + +for i in $(seq 1 "$FLEET_E2E_DS_CLUSTER_COUNT"); do + k3d cluster create "downstream$i" \ + --servers 1 \ + --api-port $((36443 + i)) \ + -p "$((4080 + (1000 * i))):80@server:0" \ + -p "$((3443 + i)):443@server:0" \ + --k3s-arg "--tls-san=k3d-downstream$i-server-0@server:0" \ + $args +done + kubectl config use-context k3d-upstream diff --git a/dev/setup-multi-cluster b/dev/setup-multi-cluster index 209089b04d..5f5728da2a 100755 --- a/dev/setup-multi-cluster +++ b/dev/setup-multi-cluster @@ -6,6 +6,8 @@ export CUSTOM_CONFIG_FILE="env.multi-cluster" # shellcheck source=dev/setup-cluster-config source dev/setup-cluster-config +FLEET_E2E_DS_CLUSTER_COUNT=${FLEET_E2E_DS_CLUSTER_COUNT:-1} + # Cleans with settings sourced, so it should be rather selective. ./dev/k3d-act-clean diff --git a/e2e/assets/helm/zot_configmap.yaml b/e2e/assets/helm/zot_configmap.yaml index 15474c2f7d..bfc7145b7b 100644 --- a/e2e/assets/helm/zot_configmap.yaml +++ b/e2e/assets/helm/zot_configmap.yaml @@ -15,12 +15,14 @@ data: "key": "/etc/zot/certs/tls.key" }, "accessControl": { - "**": { - "policies": [{ - "users": ["admin"], - "actions": ["read", "create", "update", "delete"] - }], - "defaultPolicy": ["read", "create"] + "repositories": { + "**": { + "policies": [{ + "users": ["admin"], + "actions": ["read", "create", "update", "delete"] + }], + "defaultPolicy": ["read", "create"] + } } }, "address": "0.0.0.0", @@ -29,5 +31,13 @@ data: "log": { "level": "debug" }, "storage": { "rootDirectory": "/tmp/zot" + }, + "extensions": { + "ui": { + "enable": true + }, + "search": { + "enable": true + } } } diff --git a/e2e/assets/helm/zot_deployment.yaml b/e2e/assets/helm/zot_deployment.yaml index 7fc7407565..745a7fb58e 100644 --- a/e2e/assets/helm/zot_deployment.yaml +++ b/e2e/assets/helm/zot_deployment.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: zot - image: ghcr.io/project-zot/zot-linux-amd64:v1.4.3 + image: ghcr.io/project-zot/zot-linux-amd64:v2.1.1 imagePullPolicy: IfNotPresent ports: - containerPort: 8082 diff --git a/e2e/assets/helmapp-template.yaml b/e2e/assets/helmapp-template.yaml new file mode 100644 index 0000000000..399382da59 --- /dev/null +++ b/e2e/assets/helmapp-template.yaml @@ -0,0 +1,14 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: HelmApp +metadata: + name: {{ .Name }} + {{- if ne .Shard "" }} + labels: + fleet.cattle.io/shard-ref: {{ .Shard }} + {{- end }} + namespace: {{.Namespace}} +spec: + helm: + chart: {{.Chart}} + version: {{.Version}} + namespace: {{.Namespace}} \ No newline at end of file diff --git a/e2e/assets/helmapp/helmapp.yaml b/e2e/assets/helmapp/helmapp.yaml new file mode 100644 index 0000000000..38d4c11af7 --- /dev/null +++ b/e2e/assets/helmapp/helmapp.yaml @@ -0,0 +1,14 @@ +apiVersion: fleet.cattle.io/v1alpha1 +kind: HelmApp +metadata: + name: {{.Name}} + namespace: "fleet-local" +spec: + helm: + releaseName: testhelm + repo: {{.Repo}} + chart: {{.Chart}} + version: {{.Version}} + namespace: {{.Namespace}} + helmSecretName: {{.HelmSecretName}} + insecureSkipTLSVerify: {{.InsecureSkipTLSVerify}} diff --git a/e2e/metrics/helmapp_test.go b/e2e/metrics/helmapp_test.go new file mode 100644 index 0000000000..f7333d377f --- /dev/null +++ b/e2e/metrics/helmapp_test.go @@ -0,0 +1,161 @@ +package metrics_test + +import ( + "fmt" + "math/rand" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/rancher/fleet/e2e/metrics" + "github.com/rancher/fleet/e2e/testenv" + "github.com/rancher/fleet/e2e/testenv/kubectl" + "github.com/rancher/fleet/e2e/testenv/zothelper" +) + +var _ = Describe("HelmApp Metrics", Label("helmapp"), func() { + + var ( + // kw is the kubectl command for namespace the workload is deployed to + kw kubectl.Command + namespace string + objName = "metrics" + version = "0.1.0" + ) + + BeforeEach(func() { + k = env.Kubectl.Namespace(env.Namespace) + namespace = testenv.NewNamespaceName( + objName, + rand.New(rand.NewSource(time.Now().UnixNano())), + ) + kw = k.Namespace(namespace) + + out, err := k.Create("ns", namespace) + Expect(err).ToNot(HaveOccurred(), out) + + err = testenv.CreateHelmApp( + kw, + namespace, + objName, + "oci://ghcr.io/rancher/fleet-test-configmap-chart", + version, + shard, + ) + Expect(err).ToNot(HaveOccurred()) + + DeferCleanup(func() { + out, err = k.Delete("ns", namespace) + Expect(err).ToNot(HaveOccurred(), out) + }) + }) + + When("testing HelmApp metrics", func() { + helmappMetricNames := []string{ + "fleet_helmapp_desired_ready_clusters", + "fleet_helmapp_ready_clusters", + "fleet_helmapp_resources_desired_ready", + "fleet_helmapp_resources_missing", + "fleet_helmapp_resources_modified", + "fleet_helmapp_resources_not_ready", + "fleet_helmapp_resources_orphaned", + "fleet_helmapp_resources_ready", + "fleet_helmapp_resources_unknown", + "fleet_helmapp_resources_wait_applied", + } + + It("should have exactly one metric of each type for the helmapp", func() { + Eventually(func() error { + metrics, err := etHelmApp.Get() + Expect(err).ToNot(HaveOccurred()) + for _, metricName := range helmappMetricNames { + metric, err := etHelmApp.FindOneMetric( + metrics, + metricName, + map[string]string{ + "name": objName, + "namespace": namespace, + }, + ) + if err != nil { + GinkgoWriter.Printf("ERROR Getting metric: %s: %v\n", metricName, err) + return err + } + Expect(metric.Gauge.GetValue()).To(Equal(float64(0))) + } + return nil + }).ShouldNot(HaveOccurred()) + }) + + When("the HelmApp is changed", func() { + It("it should not duplicate metrics", Label("oci-registry"), func() { + ociRef, err := zothelper.GetOCIReference(k) + Expect(err).ToNot(HaveOccurred(), ociRef) + + chartPath := fmt.Sprintf("%s/sleeper-chart", ociRef) + + out, err := kw.Patch( + "helmapp", objName, + "--type=json", + "-p", fmt.Sprintf(`[{"op": "replace", "path": "/spec/helm/chart", "value": %s}]`, chartPath), + ) + Expect(err).ToNot(HaveOccurred(), out) + Expect(out).To(ContainSubstring("helmapp.fleet.cattle.io/metrics patched")) + + // Wait for it to be changed. + Eventually(func() (string, error) { + return kw.Get("helmapp", objName, "-o", "jsonpath={.spec.helm.chart}") + }).Should(Equal(chartPath)) + + var metric *metrics.Metric + // Expect still no metrics to be duplicated. + Eventually(func() error { + metrics, err := etHelmApp.Get() + Expect(err).ToNot(HaveOccurred()) + for _, metricName := range helmappMetricNames { + metric, err = etHelmApp.FindOneMetric( + metrics, + metricName, + map[string]string{ + "name": objName, + "namespace": namespace, + }, + ) + if err != nil { + return err + } + if metric.LabelValue("chart") != chartPath { + return fmt.Errorf("path for metric %s unchanged", metricName) + } + } + return nil + }).ShouldNot(HaveOccurred()) + }) + + It("should not keep metrics if HelmApp is deleted", Label("helmapp-delete"), func() { + out, err := kw.Delete("helmapp", objName) + Expect(err).ToNot(HaveOccurred(), out) + + Eventually(func() error { + metrics, err := etHelmApp.Get() + Expect(err).ToNot(HaveOccurred()) + for _, metricName := range helmappMetricNames { + _, err := etHelmApp.FindOneMetric( + metrics, + metricName, + map[string]string{ + "name": objName, + "namespace": namespace, + }, + ) + if err == nil { + return fmt.Errorf("metric %s found", metricName) + } + } + return nil + }).ShouldNot(HaveOccurred()) + }) + }) + }) +}) diff --git a/e2e/metrics/suite_test.go b/e2e/metrics/suite_test.go index bebe8890bb..78409d93ff 100644 --- a/e2e/metrics/suite_test.go +++ b/e2e/metrics/suite_test.go @@ -23,10 +23,11 @@ func TestE2E(t *testing.T) { var ( env *testenv.Env // k is the kubectl command for the cluster registration namespace - k kubectl.Command - et metrics.ExporterTest - etGitjob metrics.ExporterTest - shard string + k kubectl.Command + et metrics.ExporterTest + etGitjob metrics.ExporterTest + etHelmApp metrics.ExporterTest + shard string ) type ServiceData struct { @@ -42,7 +43,7 @@ type ServiceData struct { // controller. // Valid app values are: fleet-controller, gitjob func setupLoadBalancer(shard string, app string) (metricsURL string) { - Expect(app).To(Or(Equal("fleet-controller"), Equal("gitjob"))) + Expect(app).To(Or(Equal("fleet-controller"), Equal("gitjob"), Equal("helmops"))) rs := rand.NewSource(time.Now().UnixNano()) port := rs.Int63()%1000 + 30000 loadBalancerName := testenv.AddRandomSuffix(app, rs) @@ -101,5 +102,8 @@ var _ = BeforeSuite(func() { gitjobMetricsURL := setupLoadBalancer(shard, "gitjob") etGitjob = metrics.NewExporterTest(gitjobMetricsURL) + helmopsMetricsURL := setupLoadBalancer(shard, "helmops") + etHelmApp = metrics.NewExporterTest(helmopsMetricsURL) + env = testenv.New() }) diff --git a/e2e/single-cluster/gitrepo_test.go b/e2e/single-cluster/gitrepo_test.go index 14102801f7..6989a2d834 100644 --- a/e2e/single-cluster/gitrepo_test.go +++ b/e2e/single-cluster/gitrepo_test.go @@ -143,53 +143,55 @@ var _ = Describe("Monitoring Git repos via HTTP for change", Label("infra-setup" By("updating the gitrepo's status") expectedStatus := fleet.GitRepoStatus{ - Commit: commit, - ReadyClusters: 1, - DesiredReadyClusters: 1, - GitJobStatus: "Current", - Summary: fleet.BundleSummary{ - NotReady: 0, - WaitApplied: 0, - ErrApplied: 0, - OutOfSync: 0, - Modified: 0, - Ready: 1, - Pending: 0, - DesiredReady: 1, - NonReadyResources: []fleet.NonReadyResource(nil), - }, - Display: fleet.GitRepoDisplay{ - ReadyBundleDeployments: "1/1", - // XXX: add state and message? - }, - Conditions: []genericcondition.GenericCondition{ - { - Type: "Ready", - Status: "True", + Commit: commit, + GitJobStatus: "Current", + StatusBase: fleet.StatusBase{ + ReadyClusters: 1, + DesiredReadyClusters: 1, + Summary: fleet.BundleSummary{ + NotReady: 0, + WaitApplied: 0, + ErrApplied: 0, + OutOfSync: 0, + Modified: 0, + Ready: 1, + Pending: 0, + DesiredReady: 1, + NonReadyResources: []fleet.NonReadyResource(nil), }, - { - Type: "Accepted", - Status: "True", + Display: fleet.StatusDisplay{ + ReadyBundleDeployments: "1/1", + // XXX: add state and message? }, - { - Type: "Reconciling", - Status: "False", + Conditions: []genericcondition.GenericCondition{ + { + Type: "Ready", + Status: "True", + }, + { + Type: "Accepted", + Status: "True", + }, + { + Type: "Reconciling", + Status: "False", + }, + { + Type: "Stalled", + Status: "False", + }, }, - { - Type: "Stalled", - Status: "False", + ResourceCounts: fleet.ResourceCounts{ + Ready: 1, + DesiredReady: 1, + WaitApplied: 0, + Modified: 0, + Orphaned: 0, + Missing: 0, + Unknown: 0, + NotReady: 0, }, }, - ResourceCounts: fleet.GitRepoResourceCounts{ - Ready: 1, - DesiredReady: 1, - WaitApplied: 0, - Modified: 0, - Orphaned: 0, - Missing: 0, - Unknown: 0, - NotReady: 0, - }, } Eventually(func(g Gomega) { status := getGitRepoStatus(k, gitrepoName) @@ -301,54 +303,56 @@ var _ = Describe("Monitoring Git repos via HTTP for change", Label("infra-setup" By("updating the gitrepo's status") expectedStatus := fleet.GitRepoStatus{ - Commit: commit, - WebhookCommit: commit, - ReadyClusters: 1, - DesiredReadyClusters: 1, - GitJobStatus: "Current", - Summary: fleet.BundleSummary{ - NotReady: 0, - WaitApplied: 0, - ErrApplied: 0, - OutOfSync: 0, - Modified: 0, - Ready: 1, - Pending: 0, - DesiredReady: 1, - NonReadyResources: []fleet.NonReadyResource(nil), - }, - Display: fleet.GitRepoDisplay{ - ReadyBundleDeployments: "1/1", - // XXX: add state and message? - }, - Conditions: []genericcondition.GenericCondition{ - { - Type: "Ready", - Status: "True", + Commit: commit, + WebhookCommit: commit, + GitJobStatus: "Current", + StatusBase: fleet.StatusBase{ + ReadyClusters: 1, + DesiredReadyClusters: 1, + Summary: fleet.BundleSummary{ + NotReady: 0, + WaitApplied: 0, + ErrApplied: 0, + OutOfSync: 0, + Modified: 0, + Ready: 1, + Pending: 0, + DesiredReady: 1, + NonReadyResources: []fleet.NonReadyResource(nil), }, - { - Type: "Accepted", - Status: "True", + Display: fleet.StatusDisplay{ + ReadyBundleDeployments: "1/1", + // XXX: add state and message? }, - { - Type: "Reconciling", - Status: "False", + Conditions: []genericcondition.GenericCondition{ + { + Type: "Ready", + Status: "True", + }, + { + Type: "Accepted", + Status: "True", + }, + { + Type: "Reconciling", + Status: "False", + }, + { + Type: "Stalled", + Status: "False", + }, }, - { - Type: "Stalled", - Status: "False", + ResourceCounts: fleet.ResourceCounts{ + Ready: 1, + DesiredReady: 1, + WaitApplied: 0, + Modified: 0, + Orphaned: 0, + Missing: 0, + Unknown: 0, + NotReady: 0, }, }, - ResourceCounts: fleet.GitRepoResourceCounts{ - Ready: 1, - DesiredReady: 1, - WaitApplied: 0, - Modified: 0, - Orphaned: 0, - Missing: 0, - Unknown: 0, - NotReady: 0, - }, } Eventually(func(g Gomega) { status := getGitRepoStatus(k, gitrepoName) diff --git a/e2e/single-cluster/helmapp_test.go b/e2e/single-cluster/helmapp_test.go new file mode 100644 index 0000000000..088e677d74 --- /dev/null +++ b/e2e/single-cluster/helmapp_test.go @@ -0,0 +1,180 @@ +package singlecluster_test + +import ( + "fmt" + "math/rand" + "os" + "strings" + "time" + + "github.com/rancher/fleet/e2e/testenv" + "github.com/rancher/fleet/e2e/testenv/kubectl" + "github.com/rancher/fleet/e2e/testenv/zothelper" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const ( + helmOpsSecretName = "secret-helmops" +) + +var _ = Describe("HelmApp resource tests", Label("infra-setup", "helm-registry"), func() { + var ( + namespace string + name string + k kubectl.Command + ) + + BeforeEach(func() { + k = env.Kubectl.Namespace(env.Namespace) + }) + + JustBeforeEach(func() { + namespace = testenv.NewNamespaceName( + name, + rand.New(rand.NewSource(time.Now().UnixNano())), + ) + + out, err := k.Create( + "secret", "generic", helmOpsSecretName, + "--from-literal=username="+os.Getenv("CI_OCI_USERNAME"), + "--from-literal=password="+os.Getenv("CI_OCI_PASSWORD"), + ) + Expect(err).ToNot(HaveOccurred(), out) + + err = testenv.ApplyTemplate(k, testenv.AssetPath("helmapp/helmapp.yaml"), struct { + Name string + Namespace string + Repo string + Chart string + HelmSecretName string + InsecureSkipTLSVerify bool + Version string + }{ + name, + namespace, + getChartMuseumExternalAddr(env), + "sleeper-chart", + helmOpsSecretName, + true, + "", + }) + Expect(err).ToNot(HaveOccurred(), out) + }) + + AfterEach(func() { + out, err := k.Delete("helmapp", name) + Expect(err).ToNot(HaveOccurred(), out) + out, err = k.Delete("secret", helmOpsSecretName) + Expect(err).ToNot(HaveOccurred(), out) + }) + + When("applying a helmapp resource", func() { + Context("containing a valid helmapp description", func() { + BeforeEach(func() { + namespace = "helmapp-ns" + name = "basic" + }) + It("deploys the chart", func() { + Eventually(func() bool { + outPods, _ := k.Namespace(namespace).Get("pods") + return strings.Contains(outPods, "sleeper-") + }).Should(BeTrue()) + Eventually(func() bool { + outDeployments, _ := k.Namespace(namespace).Get("deployments") + return strings.Contains(outDeployments, "sleeper") + }).Should(BeTrue()) + }) + }) + }) +}) + +var _ = Describe("HelmApp resource tests with oci registry", Label("infra-setup", "oci-registry"), func() { + var ( + namespace string + name string + insecure bool + k kubectl.Command + ) + + BeforeEach(func() { + k = env.Kubectl.Namespace(env.Namespace) + }) + + JustBeforeEach(func() { + namespace = testenv.NewNamespaceName( + name, + rand.New(rand.NewSource(time.Now().UnixNano())), + ) + + out, err := k.Create( + "secret", "generic", helmOpsSecretName, + "--from-literal=username="+os.Getenv("CI_OCI_USERNAME"), + "--from-literal=password="+os.Getenv("CI_OCI_PASSWORD"), + ) + Expect(err).ToNot(HaveOccurred(), out) + + ociRef, err := zothelper.GetOCIReference(k) + Expect(err).ToNot(HaveOccurred(), ociRef) + + err = testenv.ApplyTemplate(k, testenv.AssetPath("helmapp/helmapp.yaml"), struct { + Name string + Namespace string + Repo string + Chart string + HelmSecretName string + InsecureSkipTLSVerify bool + Version string + }{ + name, + namespace, + "", + fmt.Sprintf("%s/sleeper-chart", ociRef), + helmOpsSecretName, + insecure, + "0.1.0", + }) + Expect(err).ToNot(HaveOccurred(), out) + }) + + AfterEach(func() { + out, err := k.Delete("helmapp", name) + Expect(err).ToNot(HaveOccurred(), out) + out, err = k.Delete("secret", helmOpsSecretName) + Expect(err).ToNot(HaveOccurred(), out) + }) + + When("applying a helmapp resource", func() { + Context("containing a valid helmapp description pointing to an oci registry and insecure TLS", func() { + BeforeEach(func() { + namespace = "helmapp-ns" + name = "basic-oci" + insecure = true + }) + It("deploys the chart", func() { + Eventually(func() bool { + outPods, _ := k.Namespace(namespace).Get("pods") + return strings.Contains(outPods, "sleeper-") + }).Should(BeTrue()) + Eventually(func() bool { + outDeployments, _ := k.Namespace(namespace).Get("deployments") + return strings.Contains(outDeployments, "sleeper") + }).Should(BeTrue()) + }) + }) + Context("containing a valid helmapp description pointing to an oci registry and not TLS", func() { + BeforeEach(func() { + namespace = "helmapp-ns2" + name = "basic-oci-no-tls" + insecure = false + }) + It("does not deploy the chart because of TLS", func() { + Consistently(func() string { + out, _ := k.Namespace(namespace).Get("pods") + return out + }, 5*time.Second, time.Second).ShouldNot(ContainSubstring("sleeper-")) + }) + }) + }) +}) diff --git a/e2e/testenv/template.go b/e2e/testenv/template.go index 0673e7e62b..c9f7a5cd1a 100644 --- a/e2e/testenv/template.go +++ b/e2e/testenv/template.go @@ -15,6 +15,7 @@ import ( ) const gitrepoTemplate = "gitrepo-template.yaml" +const helmappTemplate = "helmapp-template.yaml" const clusterTemplate = "cluster-template.yaml" const clustergroupTemplate = "clustergroup-template.yaml" @@ -30,6 +31,16 @@ type GitRepoData struct { Shard string } +// HelmAppData can be used with the helmapp-template.yaml asset when no custom +// HelmApp properties are required. All fields except Shard are required. +type HelmAppData struct { + Name string + Chart string + Version string + Namespace string + Shard string +} + // CreateGitRepo uses the template to create a gitrepo resource. The namespace // is the TargetNamespace for the workloads. func CreateGitRepo( @@ -49,6 +60,25 @@ func CreateGitRepo( }) } +// CreateHelmApp uses the template to create a HelmApp resource. The namespace +// is the namespace for the workloads. +func CreateHelmApp( + k kubectl.Command, + namespace string, + name string, + chart string, + version string, + shard string, +) error { + return ApplyTemplate(k, AssetPath(helmappTemplate), HelmAppData{ + Namespace: namespace, + Name: name, + Chart: chart, + Version: version, + Shard: shard, + }) +} + func CreateCluster( k kubectl.Command, namespace, diff --git a/e2e/testenv/zothelper/zothelper.go b/e2e/testenv/zothelper/zothelper.go new file mode 100644 index 0000000000..a4852fe1fa --- /dev/null +++ b/e2e/testenv/zothelper/zothelper.go @@ -0,0 +1,19 @@ +package zothelper + +import ( + "fmt" + "net" + + "github.com/rancher/fleet/e2e/testenv/kubectl" +) + +func GetOCIReference(k kubectl.Command) (string, error) { + externalIP, err := k.Get("service", "zot-service", "-o", "jsonpath={.status.loadBalancer.ingress[0].ip}") + if err != nil { + return "", err + } + if net.ParseIP(externalIP) == nil { + return "", fmt.Errorf("external ip is not valid") + } + return fmt.Sprintf("oci://%s:8082", externalIP), err +} diff --git a/integrationtests/agent/adoption_test.go b/integrationtests/agent/adoption_test.go index 1b85dd8e9b..57f17b48aa 100644 --- a/integrationtests/agent/adoption_test.go +++ b/integrationtests/agent/adoption_test.go @@ -16,22 +16,6 @@ import ( "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" ) -func init() { - resources["BundleDeploymentConfigMap"] = []v1alpha1.BundleResource{ - { - Name: "configmap.yaml", - Content: `apiVersion: v1 -kind: ConfigMap -metadata: - name: cm1 -data: - key: value -`, - Encoding: "", - }, - } -} - var _ = Describe("Adoption", Label("adopt"), func() { var ( namespace string diff --git a/integrationtests/agent/bundle_deployment_drift_test.go b/integrationtests/agent/bundle_deployment_drift_test.go index 3e687ede6f..2f53e034db 100644 --- a/integrationtests/agent/bundle_deployment_drift_test.go +++ b/integrationtests/agent/bundle_deployment_drift_test.go @@ -3,7 +3,6 @@ package agent_test import ( "context" "fmt" - "os" "time" . "github.com/onsi/ginkgo/v2" @@ -21,27 +20,6 @@ import ( "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" ) -func init() { - withStatus, _ := os.ReadFile(assetsPath + "/deployment-with-status.yaml") - withDeployment, _ := os.ReadFile(assetsPath + "/deployment-with-deployment.yaml") - - resources["with-status"] = []v1alpha1.BundleResource{ - { - Name: "deployment-with-status.yaml", - Content: string(withStatus), - Encoding: "", - }, - } - - resources["with-deployment"] = []v1alpha1.BundleResource{ - { - Name: "deployment-with-deployment.yaml", - Content: string(withDeployment), - Encoding: "", - }, - } -} - var _ = Describe("BundleDeployment drift correction", Ordered, func() { const svcName = "svc-test" @@ -280,8 +258,12 @@ var _ = Describe("BundleDeployment drift correction", Ordered, func() { It("Updates the BundleDeployment status as not Ready, including the error message", func() { By("Receiving a modification on a service") - svc, err := env.getService(svcName) - Expect(err).NotTo(HaveOccurred()) + svc := corev1.Service{} + Eventually(func(g Gomega) { + var err error + svc, err = env.getService(svcName) + g.Expect(err).NotTo(HaveOccurred()) + }).Should(Succeed()) patchedSvc := svc.DeepCopy() patchedSvc.Spec.Ports[0].TargetPort = intstr.FromInt(4242) patchedSvc.Spec.Ports[0].Port = 4242 @@ -311,7 +293,7 @@ var _ = Describe("BundleDeployment drift correction", Ordered, func() { nsn := types.NamespacedName{Namespace: clusterNS, Name: name} bd := v1alpha1.BundleDeployment{} - err = k8sClient.Get(ctx, nsn, &bd, &client.GetOptions{}) + err := k8sClient.Get(ctx, nsn, &bd, &client.GetOptions{}) Expect(err).ToNot(HaveOccurred()) patchedBD := bd.DeepCopy() diff --git a/integrationtests/agent/bundle_deployment_status_test.go b/integrationtests/agent/bundle_deployment_status_test.go index 8da98a9da5..bcf6848060 100644 --- a/integrationtests/agent/bundle_deployment_status_test.go +++ b/integrationtests/agent/bundle_deployment_status_test.go @@ -2,7 +2,6 @@ package agent_test import ( "context" - "os" "time" . "github.com/onsi/ginkgo/v2" @@ -17,26 +16,6 @@ import ( "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" ) -func init() { - v1, _ := os.ReadFile(assetsPath + "/deployment-v1.yaml") - v2, _ := os.ReadFile(assetsPath + "/deployment-v2.yaml") - - resources["v1"] = []v1alpha1.BundleResource{ - { - Name: "deployment-v1.yaml", - Content: string(v1), - Encoding: "", - }, - } - resources["v2"] = []v1alpha1.BundleResource{ - { - Name: "deployment-v2.yaml", - Content: string(v2), - Encoding: "", - }, - } -} - var _ = Describe("BundleDeployment status", Ordered, func() { const ( diff --git a/integrationtests/agent/helm_capabilities_test.go b/integrationtests/agent/helm_capabilities_test.go index 408e2a4642..340e30bbaf 100644 --- a/integrationtests/agent/helm_capabilities_test.go +++ b/integrationtests/agent/helm_capabilities_test.go @@ -14,38 +14,6 @@ import ( "k8s.io/apimachinery/pkg/types" ) -func init() { - resources["capabilitiesv1"] = []v1alpha1.BundleResource{ - { - Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 1.20.0-0'\n", - Name: "config-chart/Chart.yaml", - }, - { - Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n kubeVersion: {{ .Capabilities.KubeVersion.Version }}\n apiVersions: {{ join \", \" .Capabilities.APIVersions | }}\n helmVersion: {{ .Capabilities.HelmVersion.Version }}\n", - Name: "config-chart/templates/configmap.yaml", - }, - { - Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", - Name: "fleet.yaml", - }, - } - - resources["capabilitiesv2"] = []v1alpha1.BundleResource{ - { - Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 920.920.0-0'\n", - Name: "config-chart/Chart.yaml", - }, - { - Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n", - Name: "config-chart/templates/configmap.yaml", - }, - { - Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", - Name: "fleet.yaml", - }, - } -} - var _ = Describe("Helm Chart uses Capabilities", Ordered, func() { var ( diff --git a/integrationtests/agent/suite_test.go b/integrationtests/agent/suite_test.go index c27d903c88..a4fb81e15a 100644 --- a/integrationtests/agent/suite_test.go +++ b/integrationtests/agent/suite_test.go @@ -107,6 +107,8 @@ var _ = BeforeSuite(func() { }) Expect(err).ToNot(HaveOccurred()) + setupFakeContents() + driftChan := make(chan event.GenericEvent) // Set up the bundledeployment reconciler @@ -269,7 +271,10 @@ type specEnv struct { func (se specEnv) isNotReadyAndModified(name string, modifiedStatus v1alpha1.ModifiedStatus, message string) (bool, string) { bd := &v1alpha1.BundleDeployment{} err := k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: clusterNS, Name: name}, bd, &client.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return false, err.Error() + } + isReadyCondition := checkCondition(bd.Status.Conditions, "Ready", "False", message) isOK := cmp.Equal(bd.Status.ModifiedStatus, []v1alpha1.ModifiedStatus{modifiedStatus}) && @@ -286,7 +291,10 @@ func (se specEnv) isNotReadyAndModified(name string, modifiedStatus v1alpha1.Mod func (se specEnv) isBundleDeploymentReadyAndNotModified(name string) bool { bd := &v1alpha1.BundleDeployment{} err := k8sClient.Get(context.TODO(), types.NamespacedName{Namespace: clusterNS, Name: name}, bd, &client.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + if err != nil { + return false + } + return bd.Status.Ready && bd.Status.NonModified } @@ -337,3 +345,82 @@ func createNamespace() string { return namespace } + +func setupFakeContents() { + withStatus, _ := os.ReadFile(assetsPath + "/deployment-with-status.yaml") + withDeployment, _ := os.ReadFile(assetsPath + "/deployment-with-deployment.yaml") + v1, _ := os.ReadFile(assetsPath + "/deployment-v1.yaml") + v2, _ := os.ReadFile(assetsPath + "/deployment-v2.yaml") + + resources = map[string][]v1alpha1.BundleResource{ + "with-status": []v1alpha1.BundleResource{ + { + Name: "deployment-with-status.yaml", + Content: string(withStatus), + Encoding: "", + }, + }, + "with-deployment": []v1alpha1.BundleResource{ + { + Name: "deployment-with-deployment.yaml", + Content: string(withDeployment), + Encoding: "", + }, + }, + "BundleDeploymentConfigMap": []v1alpha1.BundleResource{ + { + Name: "configmap.yaml", + Content: `apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 +data: + key: value +`, + Encoding: "", + }, + }, + "v1": []v1alpha1.BundleResource{ + { + Name: "deployment-v1.yaml", + Content: string(v1), + Encoding: "", + }, + }, + "v2": []v1alpha1.BundleResource{ + { + Name: "deployment-v2.yaml", + Content: string(v2), + Encoding: "", + }, + }, + "capabilitiesv1": []v1alpha1.BundleResource{ + { + Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 1.20.0-0'\n", + Name: "config-chart/Chart.yaml", + }, + { + Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n kubeVersion: {{ .Capabilities.KubeVersion.Version }}\n apiVersions: {{ join \", \" .Capabilities.APIVersions | }}\n helmVersion: {{ .Capabilities.HelmVersion.Version }}\n", + Name: "config-chart/templates/configmap.yaml", + }, + { + Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", + Name: "fleet.yaml", + }, + }, + "capabilitiesv2": []v1alpha1.BundleResource{ + { + Content: "apiVersion: v2\nname: config-chart\ndescription: A test chart that verifies its config\ntype: application\nversion: 0.1.0\nappVersion: \"1.16.0\"\nkubeVersion: '>= 920.920.0-0'\n", + Name: "config-chart/Chart.yaml", + }, + { + Content: "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: test-simple-chart-config\ndata:\n test: \"value123\"\n name: {{ .Values.name }}\n", + Name: "config-chart/templates/configmap.yaml", + }, + { + Content: "helm:\n chart: config-chart\n values:\n name: example-value\n", + Name: "fleet.yaml", + }, + }, + } +} diff --git a/integrationtests/controller/bundle/bundle_helm_test.go b/integrationtests/controller/bundle/bundle_helm_test.go new file mode 100644 index 0000000000..cde9a0a65a --- /dev/null +++ b/integrationtests/controller/bundle/bundle_helm_test.go @@ -0,0 +1,275 @@ +package bundle + +import ( + "crypto/rand" + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/rancher/fleet/integrationtests/utils" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var _ = Describe("Bundle with helm options", Ordered, func() { + BeforeAll(func() { + var err error + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + namespace, err = utils.NewNamespaceName() + Expect(err).ToNot(HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + Expect(k8sClient.Create(ctx, ns)).ToNot(HaveOccurred()) + + createClustersAndClusterGroups() + }) + + var ( + targets []v1alpha1.BundleTarget + targetRestrictions []v1alpha1.BundleTarget + bundleName string + bdLabels map[string]string + expectedNumberOfBundleDeployments int + helmOptions *v1alpha1.BundleHelmOptions + ) + + JustBeforeEach(func() { + bundle, err := utils.CreateHelmBundle(ctx, k8sClient, bundleName, namespace, targets, targetRestrictions, helmOptions) + Expect(err).NotTo(HaveOccurred()) + Expect(bundle).To(Not(BeNil())) + + // create secret (if helmOptions != nil) + err = createHelmSecret(k8sClient, helmOptions, namespace) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + Expect(k8sClient.Delete(ctx, &v1alpha1.Bundle{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: bundleName}})).NotTo(HaveOccurred()) + bdList := &v1alpha1.BundleDeploymentList{} + err := k8sClient.List(ctx, bdList, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(bdLabels)}) + Expect(err).NotTo(HaveOccurred()) + for _, bd := range bdList.Items { + err := k8sClient.Delete(ctx, &bd) + // BundleDeployments are now deleted in a loop by the controller, hence this delete operation + // should not be necessary. Pending further tests, we choose to ignore errors indicating that the bundle + // deployment has already been deleted here. + Expect(client.IgnoreNotFound(err)).NotTo(HaveOccurred()) + } + // delete secret (if helmOptions != nil) + err = deleteHelmSecret(k8sClient, helmOptions, namespace) + Expect(err).NotTo(HaveOccurred()) + }) + + When("helm options is NOT nil, and has no values", func() { + BeforeEach(func() { + helmOptions = &v1alpha1.BundleHelmOptions{} + bundleName = "helm-not-nil-and-no-values" + bdLabels = map[string]string{ + "fleet.cattle.io/bundle-name": bundleName, + "fleet.cattle.io/bundle-namespace": namespace, + } + expectedNumberOfBundleDeployments = 3 + // simulate targets. All targets are also added to targetRestrictions, which acts as a white list + targets = []v1alpha1.BundleTarget{ + { + ClusterGroup: "all", + }, + } + targetRestrictions = make([]v1alpha1.BundleTarget, len(targets)) + copy(targetRestrictions, targets) + }) + + It("creates three BundleDeployments with the expected helm options information", func() { + var bdList = verifyHelmBundlesDeploymentsAreCreated(expectedNumberOfBundleDeployments, bdLabels, bundleName, helmOptions) + By("not propagating helm values to BundleDeployments") + for _, bd := range bdList.Items { + Expect(bd.Spec.Options.Helm.Values).To(BeNil()) + } + }) + }) + + When("helm options is NOT nil, and has values", func() { + BeforeEach(func() { + helmOptions = &v1alpha1.BundleHelmOptions{ + SecretName: "supersecret", + InsecureSkipTLSverify: true, + } + bundleName = "helm-not-nil-and-values" + bdLabels = map[string]string{ + "fleet.cattle.io/bundle-name": bundleName, + "fleet.cattle.io/bundle-namespace": namespace, + } + expectedNumberOfBundleDeployments = 3 + // simulate targets. All targets are also added to targetRestrictions, which acts as a white list + targets = []v1alpha1.BundleTarget{ + { + ClusterGroup: "all", + }, + } + targetRestrictions = make([]v1alpha1.BundleTarget, len(targets)) + copy(targetRestrictions, targets) + }) + + It("creates three BundleDeployments with the expected helm options information", func() { + var bdList = verifyHelmBundlesDeploymentsAreCreated(expectedNumberOfBundleDeployments, bdLabels, bundleName, helmOptions) + By("and BundleDeployments have the expected values") + for _, bd := range bdList.Items { + Expect(bd.Spec.Options.Helm.Values).To(BeNil()) + } + }) + }) + + When("helm options is nil", func() { + BeforeEach(func() { + helmOptions = nil + bundleName = "helm-nil" + bdLabels = map[string]string{ + "fleet.cattle.io/bundle-name": bundleName, + "fleet.cattle.io/bundle-namespace": namespace, + } + expectedNumberOfBundleDeployments = 3 + // simulate targets. All targets are also added to targetRestrictions, which acts as a white list + targets = []v1alpha1.BundleTarget{ + { + ClusterGroup: "all", + }, + } + targetRestrictions = make([]v1alpha1.BundleTarget, len(targets)) + copy(targetRestrictions, targets) + }) + + It("creates three BundleDeployments with no helm options information", func() { + var bdList = verifyHelmBundlesDeploymentsAreCreated(expectedNumberOfBundleDeployments, bdLabels, bundleName, helmOptions) + By("not propagating helm values to BundleDeployments") + for _, bd := range bdList.Items { + Expect(bd.Spec.Options.Helm.Values).To(BeNil()) + } + }) + }) +}) + +func verifyHelmBundlesDeploymentsAreCreated( + numBundleDeployments int, + bdLabels map[string]string, + bundleName string, + helmOptions *v1alpha1.BundleHelmOptions) *v1alpha1.BundleDeploymentList { + var bdList *v1alpha1.BundleDeploymentList + bdLabels["fleet.cattle.io/bundle-name"] = bundleName + + Eventually(func(g Gomega) { + // check bundle exists + b := &v1alpha1.Bundle{} + err := k8sClient.Get(ctx, types.NamespacedName{Namespace: namespace, Name: bundleName}, b) + g.Expect(err).NotTo(HaveOccurred()) + + bdList = &v1alpha1.BundleDeploymentList{} + err = k8sClient.List(ctx, bdList, client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(bdLabels)}) + Expect(err).NotTo(HaveOccurred()) + + g.Expect(len(bdList.Items)).To(Equal(numBundleDeployments)) + for _, bd := range bdList.Items { + // all bds should have the expected helm options + g.Expect(bd.Spec.HelmChartOptions).To(Equal(helmOptions)) + + // if helmOptions.SecretName != "" it should also create + // a secret in the bundle deployment namespace that contains + // the same data as in the bundle namespace + checkBundleDeploymentSecret(k8sClient, helmOptions, bundleName, namespace, bd.Namespace) + + // the bundle deployment should have the expected finalizer + g.Expect(controllerutil.ContainsFinalizer(&bd, "fleet.cattle.io/bundle-deployment-finalizer")).To(BeTrue()) + } + }).Should(Succeed()) + + return bdList +} + +func getRandBytes(size int) ([]byte, error) { + buf := make([]byte, size) + // then we can call rand.Read. + _, err := rand.Read(buf) + + return buf, err +} + +func createHelmSecret(c client.Client, helmOptions *v1alpha1.BundleHelmOptions, ns string) error { + if helmOptions == nil || helmOptions.SecretName == "" { + return nil + } + username, err := getRandBytes(10) + if err != nil { + return err + } + + password, err := getRandBytes(10) + if err != nil { + return err + } + + certs, err := getRandBytes(20) + if err != nil { + return err + } + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: helmOptions.SecretName, + Namespace: ns, + }, + Data: map[string][]byte{v1.BasicAuthUsernameKey: username, v1.BasicAuthPasswordKey: password, "cacerts": certs}, + Type: v1.SecretTypeBasicAuth, + } + + return c.Create(ctx, secret) +} + +func deleteHelmSecret(c client.Client, helmOptions *v1alpha1.BundleHelmOptions, ns string) error { + if helmOptions == nil || helmOptions.SecretName == "" { + return nil + } + nsName := types.NamespacedName{Namespace: ns, Name: helmOptions.SecretName} + secret := &v1.Secret{} + err := c.Get(ctx, nsName, secret) + if err != nil { + return err + } + + return c.Delete(ctx, secret) +} + +func checkBundleDeploymentSecret(c client.Client, helmOptions *v1alpha1.BundleHelmOptions, bundleName, bNamespace, bdNamespace string) { + if helmOptions == nil || helmOptions.SecretName == "" { + // nothing to check + return + } + + // get the secret for the bundle + nsName := types.NamespacedName{Namespace: bNamespace, Name: helmOptions.SecretName} + bundleSecret := &v1.Secret{} + err := c.Get(ctx, nsName, bundleSecret) + Expect(err).NotTo(HaveOccurred()) + + // get the secret for the bundle deployment + bdNsName := types.NamespacedName{Namespace: bdNamespace, Name: helmOptions.SecretName} + bdSecret := &v1.Secret{} + err = c.Get(ctx, bdNsName, bdSecret) + Expect(err).NotTo(HaveOccurred()) + + // both secrets have the same data + Expect(bdSecret.Data).To(Equal(bundleSecret.Data)) + + // check that the controller reference is set in the bundle deployment secret + controller := metav1.GetControllerOf(bdSecret) + Expect(controller).ToNot(BeNil()) + + Expect(controller.Name).To(Equal(bundleName)) + Expect(controller.Kind).To(Equal("BundleDeployment")) + Expect(controller.APIVersion).To(Equal("fleet.cattle.io/v1alpha1")) +} diff --git a/integrationtests/helmops/controller/assets/root.crt b/integrationtests/helmops/controller/assets/root.crt new file mode 100644 index 0000000000..e1c49c41d6 --- /dev/null +++ b/integrationtests/helmops/controller/assets/root.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- + +MIIFyTCCA7GgAwIBAgIUT/t05vkxjiRxkUAxVpyYUEWGh1UwDQYJKoZIhvcNAQEL +BQAwdDELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZsZWV0bGFuZDESMBAGA1UEBwwJ +RmxlZXRjaXR5MRAwDgYDVQQKDAdSYW5jaGVyMQ4wDAYDVQQLDAVGbGVldDEbMBkG +A1UEAwwSRmxlZXQtVGVzdCBSb290IENBMB4XDTI0MTExNTE2NTMzNVoXDTI1MTEx +NTE2NTMzNVowdDELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZsZWV0bGFuZDESMBAG +A1UEBwwJRmxlZXRjaXR5MRAwDgYDVQQKDAdSYW5jaGVyMQ4wDAYDVQQLDAVGbGVl +dDEbMBkGA1UEAwwSRmxlZXQtVGVzdCBSb290IENBMIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEAyTdT49R5T5w3DENcAkAj7g6TY8WeYG/QwcUA4daXPz5V +79W+Yaa6oOJCPWFGAeKTZC1zI421AEVSFMVoI0g+nyWOLx/YvlKwsc8m+Nkk8Iyy +XtFzHhFt8bs4gOf8rfXPVBSXQFl69Po7wDqOaxxiwS8OHPZ6VTcGZ+RNYXrxPRZn +HCcEJsvWZJvj8OY9ZEcNxc/3Oz7OfDsjFtUbRGT5idSTwUc5ihTN+7BqRj/TEYcz +fIE1ipdKNsZR/EajetH6aIzOuk0YyZzpaS3y/ae8vaizLGGy7OyDnqieuXSXr5Bd +of42RFbaKKTyvK2h43r3sJVSHFxqmIIpMbYNwHyFvARCtglCBoZu58F5hb35S5Ve +7JttamE1Xuw6jmk7tP9dajNK2luPeKT2FXWNaVXBO+j1cqaUFMm4R6LovqTJMZfK +XEv02R3GxUFnKJq7A3bkeGO0F6mdfKtoAiXorgJajyBUDLSih6fPawBYmV8XtHmM +pVBfXTzzY2FWAgm2c+o9ak3jgGjOQ87M30PO6hkY5tDZAJZy0lbgfL0/A7w35Udk +poAvfLyCpljWsXmOR2A2T00gqg2mueCvOwcDE6q/ExXXgjEPTDvvnW03CoW3bXKU +9AdHVGwOslVP8uPG729n27D5qXf4XEkUjPzaO21ACzAqwp5q7lEJI4413sd0uicC +AwEAAaNTMFEwHQYDVR0OBBYEFFNxX1oCze1k4uRjcKqKgOnNxyrOMB8GA1UdIwQY +MBaAFFNxX1oCze1k4uRjcKqKgOnNxyrOMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggIBAB+PgYWFSdb3lo5x+IiRJf26JJbT7o2Mmybm/pD7vppgOV0z +pigrP2+v0+DyTe18YJvptUxI1jx0mHAQylxvbQrVV+QG2NoSQxnmSoLuN0+yTr61 +UMA6eGOqcr3zb45AWz9vUnHl6guhuLa7vtkDlERjHua13pmC6WmTp1fsNEcAUnyk +tlsCc2T5BrsouDgpSyTgco9ZyjOi6mlMSWiDGRYEdNK1DMVt4vEiLmADfXiqmMIA +SccPu1yCBJ1Q6lcHRDFl0PFNrJGQlv31Qh834Vj6+7B2nw/vJW+kbEwjXc9Etmee +hXGu+weG6mQ+CfWmaKdR44jdSyHTqMYhtH3LGW6hpYF7bHykyg+jDAZ/RLkEihas +EtHqDTftRKhzkKeRHsvFR8T9ErNidy50qeAQMRdB8urtQd3XpPka24VcanUWb9kB +WJfJpJK+lnbcFFzuUBxfO5q5l5Ax298VSY58XdD5GGyMWRjudcMCwTKFEyHMwV2j +lcrq/ZyAHjqZQ69yKNEHu3fltUtp0dHFKnwc5wLB1ggkFwzHflExs0h+VR3iGx++ +BkdtgTx+yTWMgJjOnS1Hg9k69AtvbEBhCj5lw02X7lMYZgRA98c9Eqtmkk4k1rVq +MhhKVUiQz7YgIUJO9951exJQpef4j0sIMWooR6VzU+9GjcaDMKsa0D/S4ol7 +-----END CERTIFICATE----- \ No newline at end of file diff --git a/integrationtests/helmops/controller/assets/server.crt b/integrationtests/helmops/controller/assets/server.crt new file mode 100644 index 0000000000..5ed98009b5 --- /dev/null +++ b/integrationtests/helmops/controller/assets/server.crt @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF5DCCA8ygAwIBAgIUIiKh1eJlLWS2dLQMnsmnElm/yaYwDQYJKoZIhvcNAQEL +BQAwdDELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZsZWV0bGFuZDESMBAGA1UEBwwJ +RmxlZXRjaXR5MRAwDgYDVQQKDAdSYW5jaGVyMQ4wDAYDVQQLDAVGbGVldDEbMBkG +A1UEAwwSRmxlZXQtVGVzdCBSb290IENBMB4XDTI0MTExNTE2NTMzNloXDTI1MTEx +NTE2NTMzNlowbDELMAkGA1UEBhMCREUxEjAQBgNVBAgMCUZsZWV0bGFuZDESMBAG +A1UEBwwJRmxlZXRjaXR5MRAwDgYDVQQKDAdSYW5jaGVyMQ4wDAYDVQQLDAVGbGVl +dDETMBEGA1UEAwwKRmxlZXQtVGVzdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCC +AgoCggIBAL4epFutz0JWEi06EdhbtgR06dsdSk+Xuy2f5OdUUZINpcPx7pMWJhNb +fLUWLrTgaRWQ3rwY1Xt+bSHy2viUvi6nBj/g6FF1quGu4lJ8NwIZuwio+9zVfByZ +ru8AoYvjbLxjhnXjLVsu1Xr3QMutszmiENiWDqb1ywhQxQnbISkm0dq2mPwm3ZK8 +F8mT0FkdLFlnyEGvPcm1n+Lq5qIMZ9jEM+n2mjCUagWOJJ5h1l0ISp/bUtWZwdf4 +FrXbhBSPcXrRMwLeI0Xbr4OU4BT6UW4CwBm70ku58ac+L9Jym9SJuJURguvZeli3 +h1dEc3czPoIWVp6D8iIK+g6SXZwQHmRex9LDyZoL7DD2ky40I+ZAkuliNdZljqKK +Y+CpoTIQWxfDnfahnVV+KTxochWLlyvX7miPl53nY1ofTk3H0MitCHvVFjZz5qmS +UWDD52IapYK0M6tJy4XsIQaKu0UxMYFpKZGuBd0sQRZkqmKT1ZsNxyAwdjXD6mME +E8YTc+g0lZEOxP7xlWqV90s9PfFKl0POs4999TK4uCBNPNPnF07SR0uOk7Z2lqly +zZIr7/MOoSCYya8sSDiz6FCEW08s6Y1WrgMEVNiGa5i7+eh1gMm/YtNSNbdR6FR/ +Zom40xCj+B5jFOWaVdcxLWW+HC7uATs19Dw7K0Hb1Bk06ZhZbK1rAgMBAAGjdjB0 +MB8GA1UdIwQYMBaAFFNxX1oCze1k4uRjcKqKgOnNxyrOMAkGA1UdEwQCMAAwCwYD +VR0PBAQDAgTwMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAdBgNVHQ4EFgQU +CHbpVasvuPmUIyR52wPCoZC7fn0wDQYJKoZIhvcNAQELBQADggIBAKpQR56axPeF +Pf7R+e6LW03EY0AAyQuIXoUEzvzYM8GC2egTqKfnrSSh/oQm1KP4iOAljBYXKKU5 +/H9qunKw9AjKR6NhAEGYPztwkAS+pp/f3H3GErEMmctMtEyUOWSKTcpAiw7ncOkn +HIPIHAd4y1lJfFsDkWCi7hC34SNkdSUChFEdRTiIjhrcUDVmgn95Lvhhqs9mAibY +s2tGKfspNbtmaukRCIFIKx5Mm++C6I4cC8Ws38qDQVok2M+FhVLVKCGOaBkp/dWZ +jswKcluH/tPw7vFhEEu1yk8Wssr7CMOso06/qJHmJkLuK3jvnTaYp2nRgnHlu8pa +UkNyLZiObX1xpycdinyO5PjWw5S4agHZknOYik+6+i0GSuGWBD4koXYSYvsgl8Db +iFdMu/IVGmcmKipx+yNcwLpVHrVUoPTd2snsi5lz6r0sBS5GSogOYbYGbGrK4QrR +m+VTg8L/1479gL254C649AsEvNhZo2NPex1PqEHnkHLP8b8Ysv+VknNELrE1X0Sl +tgvwYOw3OkeXIHHxxh9fNYEz4m+khKLan34C45T2GRBnlgjIF/OCOhNZxmaXucqe +nBar03VBIqh5MOmAErmYQlclQKZoWbf6x+VGd044Z33S7ztf40uH9lnN2laZADdz +W2Oc3GFDMzAjvzqw8bFnqwvfF+rQuJuI +-----END CERTIFICATE----- \ No newline at end of file diff --git a/integrationtests/helmops/controller/assets/server.key b/integrationtests/helmops/controller/assets/server.key new file mode 100644 index 0000000000..83eecdfa49 --- /dev/null +++ b/integrationtests/helmops/controller/assets/server.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC+HqRbrc9CVhIt +OhHYW7YEdOnbHUpPl7stn+TnVFGSDaXD8e6TFiYTW3y1Fi604GkVkN68GNV7fm0h +8tr4lL4upwY/4OhRdarhruJSfDcCGbsIqPvc1Xwcma7vAKGL42y8Y4Z14y1bLtV6 +90DLrbM5ohDYlg6m9csIUMUJ2yEpJtHatpj8Jt2SvBfJk9BZHSxZZ8hBrz3JtZ/i +6uaiDGfYxDPp9powlGoFjiSeYdZdCEqf21LVmcHX+Ba124QUj3F60TMC3iNF26+D +lOAU+lFuAsAZu9JLufGnPi/ScpvUibiVEYLr2XpYt4dXRHN3Mz6CFlaeg/IiCvoO +kl2cEB5kXsfSw8maC+ww9pMuNCPmQJLpYjXWZY6iimPgqaEyEFsXw532oZ1Vfik8 +aHIVi5cr1+5oj5ed52NaH05Nx9DIrQh71RY2c+apklFgw+diGqWCtDOrScuF7CEG +irtFMTGBaSmRrgXdLEEWZKpik9WbDccgMHY1w+pjBBPGE3PoNJWRDsT+8ZVqlfdL +PT3xSpdDzrOPffUyuLggTTzT5xdO0kdLjpO2dpapcs2SK+/zDqEgmMmvLEg4s+hQ +hFtPLOmNVq4DBFTYhmuYu/nodYDJv2LTUjW3UehUf2aJuNMQo/geYxTlmlXXMS1l +vhwu7gE7NfQ8OytB29QZNOmYWWytawIDAQABAoICACyX611Fq3OX1LOfB0iEWnE5 +KxEmEaQRpunQs1Q/RtLHOLZ5LMh7TXsE3n9rMJFkgcF5NYVRHeHViauI1yuvV9yB +eMnK6zMQMoC1EIjgcdagSmqBmHH38SCUO5/7ueih84NMpOFJ4/2bQp+RFzWvDHbc +OK9UoyMuS+0rZMwnBeQtItP2OHQMebRNQhcaAKimWxytZx9hB1EktNf42RfxaPpD +KxoZqZjzdtrOuHAd6rXvl/Fe9FL9uaX6nvkRAC4CZ0+zeg+WIxfjq4tlhBnnjOoM +4xomH/F7L99WiskF8N8tXoo4jUjcvgHJKomhmKPA9Ux2COMtd8HcaUK5uhM9BKOG +obS7ZWbrMrOSyY5zXjAvZMt8pJ1Npgq+u5VaLyrIBZaguuWS6+p5Et1q20znPp4o +/3ezXXjQ+t06NX1B/hO1OfzvjydnX44DJ7giGlvb6qmzDkMxfy4w5Niu6hvZ2DDP +Hg+m+M/bk6ZAEhgnXF3UsvfLkhzaaToF23gmbZqho4T5ZjBJhtGMvR7MrAEXMuZD +2M7fMFZy+ugZn0cBB9makyZ8cq5MKqTUkSmySj/PIm2ae03m19SnU/4LS3YyZrDU +5777gAks/6lrqHEdAMSFvZr8ReByn8/uEAEpaACtKz/Rxkio6Vc3LxUYzKM4uDGe +0gYqY2reBNt5zlJsG+9hAoIBAQDpBOFZ7iNesWuv/e01soQyiAHRQPnylxzfWSGt +1QS/tGuma4GgxPY9pKBeGsBUQKZ8nJVOJDMP3aA3Xz+88MRVLLvnpffITQo4ohiJ +EHoBtVhkA90lnudccgOojzOs0HPFk9Oxm4GV6J4OPc6foAjKBYklAjcBw+iAFHMk +KzqTFxky/qle4ZTswaMV7rbUFT310p/VzH7rFzcRQ76azx/i2J54UspRhchywvfD +6ca4eKtYeaSh98sffd9mFUthaJUYax6qlefclxBKGXnPNLTVgKHDUo1/OsgLJgyK +56WNAFUsXdlJLFi2b3LSdfUuaJoI/LFefn8zAVb+UBbmglutAoIBAQDQ3qpRPa59 +X4/1j5IuhyxnW0KrzO0ywuPZKslHh51Ox5o5BEE2pR9JFsmCb3OM0ACqsnit4UMI +ORUmcJAEKHchvTRHCJwWqsk0r0tDhkUvz8N/jOUgmqwEJFwmrA5A9uRZdi/ySEVD +0Xp5Uw+RAuykmb8JsHljH4seL3Pxpx7BTMv6O0gOFnCMHwP8OtCgJGeoiCkDGeL6 +TeywfgkaNPRXNJIMTv+/qa0e2c21bmPev1PtwxQHyYq0UD1Nz66ycSDReFGccYXL +2ziEw6RRIeOkxM3wIV6Sykib9I+svAVIdrysUklIZvoskJRldWWO6SCxRP+yez6s +Bn5J40+UplB3AoIBADEw2Y3NiuPzmmMlvMzIKcYtFg0hpWJD6lFwFH8I6B68LLmO +GmhhDAaJWV1kUlO27i6CM7ayR6FCzQ7DacYuIZRFhElrrPo44T6BYaKVutvfd5Bt +jGLjv72xR/pueJ8zxizgfyEQTfPijnM9MwBZnWFgd8o7RHd37v4S0xfAlHX2u1gb +kI+6GWE9o3r+0NPGxDS/yQQuTmC8nuBjJ7qwnO+bgSCvgYxiLKWlaP6PvGa2+p0L +2OhkUhoMzXtUZXxjwo3MF0Y1rSPRNBwgcql+W+pyZDPCmqJQO2i3GJC+RCGW/2QF +T9h1pyikMF4jjqXEaTgaeCsVky6mSsIXEC6LOGUCggEACFCk9SEAfks8nuj9R87n +zKGMcOxykO/DRFT4uFlEwOsfT5/EvNkr+qvmj8PCFNv++syqEzoBgiVLm0El6pR1 +0akHmMBV/m0EH43O8Dw7KuEZhk1knbyqlmugI4X790gc5RbYZ8vKvh1rw8KzvvEf +3JmmSkt1OaX60tPOyNL/XXCiOi77+luYVWuyq+rnfUiVu9bX0yDHsXFCt+/8iseK +5qHYIpdOhSHLG4xOLSfc3/Q78h4vAPRcCjubhSp8aOwqA0zH6vN2ARyUDmz/cJ9p +wZh4HlQlwLA+3b6JrbW6fB0F+9I2yqQW14lV7wgSZ/MN8yCtETzozM5hXq2m8GMC +lwKCAQEAoQZZdUNRaT0eA3ddxLGXxMILzl3VflkbUtmsIsMq2Pp/bOrAEWybuTu2 +Rsb5hhrOh7Qce8ety9ErWCXtZm48Rt3GLGjKvrK8QIOVq6nJ08ON2DJhR4A1tyVx +aN+XbBz79VVmQYGuKOE4SPNA40mjr7RURSk2frGvHlF0tEC2CnJGA1Y+vjQR1wIz +sQUvpBPqs6d2k9agjErzjrjoL8kj9JA0u6afQwYTbTOBqJgh7OZANOox6ATIIYkv +yAWhx4Od0gZjkCefeTOiFnfN9fjG8GeogDF5SC2WkHuIXmbYanc63qKUZdDR2xgT +k5tkVFvh2ekQpg40bPvflxarwgpLBg== +-----END PRIVATE KEY----- \ No newline at end of file diff --git a/integrationtests/helmops/controller/controller_test.go b/integrationtests/helmops/controller/controller_test.go new file mode 100644 index 0000000000..65515a1c40 --- /dev/null +++ b/integrationtests/helmops/controller/controller_test.go @@ -0,0 +1,971 @@ +package controller + +import ( + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "github.com/rancher/fleet/e2e/testenv" + "github.com/rancher/fleet/internal/cmd/controller/finalize" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/genericcondition" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" +) + +var letters = []rune("abcdefghijklmnopqrstuvwxyz") + +const ( + maxLabelsLength = 5 + maxGenericStringLength = 10 + authUsername = "superuser" + authPassword = "superpassword" + helmRepoIndex = `apiVersion: v1 +entries: + alpine: + - created: 2016-10-06T16:23:20.499814565-06:00 + description: Deploy a basic Alpine Linux pod + digest: 99c76e403d752c84ead610644d4b1c2f2b453a74b921f422b9dcb8a7c8b559cd + home: https://helm.sh/helm + name: alpine + sources: + - https://github.com/helm/helm + urls: + - https://technosophos.github.io/tscharts/alpine-0.2.0.tgz + version: 0.2.0 + - created: 2016-10-06T16:23:20.499543808-06:00 + description: Deploy a basic Alpine Linux pod + digest: 515c58e5f79d8b2913a10cb400ebb6fa9c77fe813287afbacf1a0b897cd78727 + home: https://helm.sh/helm + name: alpine + sources: + - https://github.com/helm/helm + urls: + - https://technosophos.github.io/tscharts/alpine-0.1.0.tgz + version: 0.1.0 + nginx: + - created: 2016-10-06T16:23:20.499543808-06:00 + description: Create a basic nginx HTTP server + digest: aaff4545f79d8b2913a10cb400ebb6fa9c77fe813287afbacf1a0b897cdffffff + home: https://helm.sh/helm + name: nginx + sources: + - https://github.com/helm/charts + urls: + - https://technosophos.github.io/tscharts/nginx-1.1.0.tgz + version: 1.1.0 +generated: 2016-10-06T16:23:20.499029981-06:00` +) + +func randBool() bool { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return r.Intn(2) == 1 +} + +func randString() string { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + b := make([]rune, maxGenericStringLength) + for i := range b { + b[i] = letters[r.Intn(len(letters))] + } + return string(b) +} + +func randStringSlice() []string { + n := rand.Intn(maxLabelsLength) + r := make([]string, n) + for i := range r { + r[i] = randString() + } + return r +} + +func randInterfaceMap() map[string]interface{} { + nbItems := rand.Intn(maxLabelsLength) + items := make(map[string]interface{}) + for range nbItems { + items[randString()] = randString() + } + return items +} + +func randStringMap() map[string]string { + m := randInterfaceMap() + labels := make(map[string]string) + for k, v := range m { + s, ok := v.(string) + if ok { + labels[k] = s + } + } + return labels +} + +func randHelmOptions() *fleet.HelmOptions { + // we always have helm options in HelmApp resources + h := &fleet.HelmOptions{ + Chart: randString(), + Repo: randString(), + ReleaseName: randString(), + Version: randString(), // return also semver version? + TimeoutSeconds: rand.Intn(3), + Values: &fleet.GenericMap{Data: randInterfaceMap()}, + Force: randBool(), + TakeOwnership: randBool(), + MaxHistory: rand.Intn(4), + ValuesFiles: randStringSlice(), + WaitForJobs: randBool(), + Atomic: randBool(), + DisablePreProcess: randBool(), + DisableDNS: randBool(), + SkipSchemaValidation: randBool(), + DisableDependencyUpdate: randBool(), + } + + return h +} + +func randKustomizeOptions() *fleet.KustomizeOptions { + if randBool() { + return nil + } + o := &fleet.KustomizeOptions{} + o.Dir = randString() + return o +} + +func randBundleDeploymentOptions() fleet.BundleDeploymentOptions { + o := fleet.BundleDeploymentOptions{ + DefaultNamespace: randString(), + TargetNamespace: randString(), + Kustomize: randKustomizeOptions(), + Helm: randHelmOptions(), + CorrectDrift: randCorrectDrift(), + ServiceAccount: randString(), + } + return o +} + +func randCorrectDrift() *fleet.CorrectDrift { + if randBool() { + return nil + } + r := &fleet.CorrectDrift{ + Enabled: randBool(), + Force: randBool(), + KeepFailHistory: randBool(), + } + + return r +} + +func getRandomHelmAppWithTargets(name string, t []fleet.BundleTarget) fleet.HelmApp { + namespace = testenv.NewNamespaceName( + name, + rand.New(rand.NewSource(time.Now().UnixNano())), + ) + h := fleet.HelmApp{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + // add a few random values + Spec: fleet.HelmAppSpec{ + Labels: randStringMap(), + BundleSpec: fleet.BundleSpec{ + BundleDeploymentOptions: randBundleDeploymentOptions(), + }, + HelmSecretName: randString(), + InsecureSkipTLSverify: randBool(), + }, + } + + h.Spec.Targets = t + + return h +} + +// compareBundleAndHelmAppSpecs compares the part that it is expected to be equal +// between a Bundle's spec and a HelmApp's spec. +func compareBundleAndHelmAppSpecs(g Gomega, bundle fleet.BundleSpec, helmapp fleet.BundleSpec) { + g.Expect(bundle.BundleDeploymentOptions).To(Equal(helmapp.BundleDeploymentOptions)) + g.Expect(bundle.Paused).To(Equal(helmapp.Paused)) + g.Expect(bundle.RolloutStrategy).To(Equal(helmapp.RolloutStrategy)) + g.Expect(bundle.Resources).To(Equal(helmapp.Resources)) + g.Expect(bundle.Targets).To(Equal(helmapp.Targets)) + g.Expect(bundle.TargetRestrictions).To(Equal(helmapp.TargetRestrictions)) + g.Expect(bundle.DependsOn).To(Equal(helmapp.DependsOn)) +} + +// checkBundleIsAsExpected verifies that the bundle is a valid bundle created after +// the given HelmApp resource. +func checkBundleIsAsExpected(g Gomega, bundle fleet.Bundle, helmapp fleet.HelmApp, expectedTargets []v1alpha1.BundleTarget) { + g.Expect(bundle.Name).To(Equal(helmapp.Name)) + g.Expect(bundle.Namespace).To(Equal(helmapp.Namespace)) + // the bundle should have the same labels as the helmapp resource + // plus the fleet.HelmAppLabel containing the name of the helmapp + lbls := make(map[string]string) + for k, v := range helmapp.Spec.Labels { + lbls[k] = v + } + lbls = labels.Merge(lbls, map[string]string{ + fleet.HelmAppLabel: helmapp.Name, + }) + g.Expect(bundle.Labels).To(Equal(lbls)) + + g.Expect(bundle.Spec.Resources).To(BeNil()) + g.Expect(bundle.Spec.HelmAppOptions).ToNot(BeNil()) + g.Expect(bundle.Spec.HelmAppOptions.SecretName).To(Equal(helmapp.Spec.HelmSecretName)) + g.Expect(bundle.Spec.HelmAppOptions.InsecureSkipTLSverify).To(Equal(helmapp.Spec.InsecureSkipTLSverify)) + + g.Expect(bundle.Spec.Targets).To(Equal(expectedTargets)) + + // now that the bundle spec has been checked we assign the helmapp spec targets + // so it is easier to check the whole spec. (They should be identical except for the + // targets) + bundle.Spec.Targets = helmapp.Spec.Targets + + compareBundleAndHelmAppSpecs(g, bundle.Spec, helmapp.Spec.BundleSpec) + + // the bundle controller should add the finalizer + g.Expect(controllerutil.ContainsFinalizer(&bundle, finalize.BundleFinalizer)).To(BeTrue()) +} + +func updateHelmApp(helmapp fleet.HelmApp) error { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + var helmAppFromCluster fleet.HelmApp + err := k8sClient.Get(ctx, types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace}, &helmAppFromCluster) + if err != nil { + return err + } + helmAppFromCluster.Spec = helmapp.Spec + return k8sClient.Update(ctx, &helmAppFromCluster) + }) +} + +func getCondition(fllethelm *fleet.HelmApp, condType string) (genericcondition.GenericCondition, bool) { + for _, cond := range fllethelm.Status.Conditions { + if cond.Type == condType { + return cond, true + } + } + return genericcondition.GenericCondition{}, false +} + +func checkConditionContains(g Gomega, fllethelm *fleet.HelmApp, condType string, status corev1.ConditionStatus, message string) { + cond, found := getCondition(fllethelm, condType) + g.Expect(found).To(BeTrue()) + g.Expect(cond.Type).To(Equal(condType)) + g.Expect(cond.Status).To(Equal(status)) + g.Expect(cond.Message).To(ContainSubstring(message)) +} + +func newTLSServerWithAuth() *httptest.Server { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + username, password, ok := r.BasicAuth() + if ok { + usernameHash := sha256.Sum256([]byte(username)) + passwordHash := sha256.Sum256([]byte(password)) + expectedUsernameHash := sha256.Sum256([]byte(authUsername)) + expectedPasswordHash := sha256.Sum256([]byte(authUsername)) + + usernameMatch := (subtle.ConstantTimeCompare(usernameHash[:], expectedUsernameHash[:]) == 1) + passwordMatch := (subtle.ConstantTimeCompare(passwordHash[:], expectedPasswordHash[:]) == 1) + + if usernameMatch && passwordMatch { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + } + } + + w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + })) + return srv +} + +func getNewCustomTLSServer(handler http.Handler) (*httptest.Server, error) { + ts := httptest.NewUnstartedServer(handler) + serverCert, err := os.ReadFile("assets/server.crt") + if err != nil { + return nil, err + } + serverKey, err := os.ReadFile("assets/server.key") + if err != nil { + return nil, err + } + cert, err := tls.X509KeyPair(serverCert, serverKey) + if err != nil { + return nil, err + } + ts.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + ts.StartTLS() + return ts, nil +} + +var _ = Describe("HelmOps controller", func() { + When("a new HelmApp is created", func() { + var helmapp fleet.HelmApp + var targets []fleet.BundleTarget + var doAfterNamespaceCreated func() + JustBeforeEach(func() { + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + nsSpec := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + err := k8sClient.Create(ctx, nsSpec) + Expect(err).ToNot(HaveOccurred()) + Expect(k8sClient.Create(ctx, &helmapp)).ToNot(HaveOccurred()) + if doAfterNamespaceCreated != nil { + doAfterNamespaceCreated() + } + + DeferCleanup(func() { + Expect(k8sClient.Delete(ctx, nsSpec)).ToNot(HaveOccurred()) + _ = k8sClient.Delete(ctx, &helmapp) + }) + }) + When("targets is empty", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-empty", targets) + }) + + It("creates a bundle with the expected spec and default target", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + }) + + It("adds the expected finalizer to the HelmApp resource", func() { + Eventually(func(g Gomega) { + fh := &fleet.HelmApp{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, fh) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(controllerutil.ContainsFinalizer(fh, finalize.HelmAppFinalizer)).To(BeTrue()) + }).Should(Succeed()) + }) + }) + + When("helmapp is updated", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-updated", targets) + }) + + It("updates the bundle with the expected content", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + + // update the HelmApp spec + helmapp.Spec.Helm.Chart = "superchart" + + err := updateHelmApp(helmapp) + Expect(err).ToNot(HaveOccurred()) + + // Bundle should be updated + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + checkBundleIsAsExpected(g, *bundle, helmapp, t) + + // make this check explicit + g.Expect(bundle.Spec.Helm.Chart).To(Equal("superchart")) + }).Should(Succeed()) + }) + }) + + When("targets is not empty", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{ + { + Name: "one", + ClusterGroup: "oneGroup", + }, + { + Name: "two", + ClusterGroup: "twoGroup", + }, + } + helmapp = getRandomHelmAppWithTargets("test-not-empty", targets) + }) + + It("creates a bundle with the expected spec and the original targets", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + checkBundleIsAsExpected(g, *bundle, helmapp, targets) + }).Should(Succeed()) + }) + }) + + When("helm chart is empty", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-empty", targets) + // no chart is defined + helmapp.Spec.Helm.Chart = "" + }) + + It("does not create a bundle", func() { + Consistently(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }, 5*time.Second, time.Second).Should(Succeed()) + }) + }) + + When("helmapp is added and then deleted", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-add-delete", targets) + }) + + It("creates and deletes the bundle", func() { + // bundle should be initially created + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + + // delete the helmapp resource + err := k8sClient.Delete(ctx, &helmapp) + Expect(err).ShouldNot(HaveOccurred()) + + // eventually the bundle should be gone as well + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }).Should(Succeed()) + + // and the helmapp should be gone too (finalizer is deleted) + Eventually(func(g Gomega) { + fh := &fleet.HelmApp{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, fh) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }).Should(Succeed()) + }) + }) + + Context("version is not specified", func() { + var version string + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-no-version", targets) + + // version is empty + helmapp.Spec.Helm.Version = version + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + })) + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + }) + + bundleCreatedWithLatestVersion := func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.2.0 as it is the + // latest in the test helm index.html + // set it here so the check passes and confirms + // the version obtained was 0.2.0 + helmapp.Spec.Helm.Version = "0.2.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + } + + usesVersionSpecified := func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.2.0 as it is the + // latest in the test helm index.html + // set it here so the check passes and confirms + // the version obtained was 0.2.0 + helmapp.Spec.Helm.Version = "0.2.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + + // update the HelmApp spec to use version 0.1.0 + helmapp.Spec.Helm.Version = "0.1.0" + + err := updateHelmApp(helmapp) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.1.0 as it is + // what we specified + helmapp.Spec.Helm.Version = "0.1.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + } + + When("version is empty", func() { + BeforeEach(func() { + version = "" + }) + It("creates a bundle with the latest version it got from the index", bundleCreatedWithLatestVersion) + It("uses the version specified if later the user sets it", usesVersionSpecified) + }) + + When("version is *", func() { + BeforeEach(func() { + version = "*" + }) + It("creates a bundle with the latest version it got from the index", bundleCreatedWithLatestVersion) + It("uses the version specified if later the user sets it", usesVersionSpecified) + }) + }) + + When("connecting to a https server", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-https", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + })) + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + helmapp.Spec.InsecureSkipTLSverify = false + }) + + It("does not create a bundle and returns and sets an error due to self signed certificate", func() { + Consistently(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }, 5*time.Second, time.Second).Should(Succeed()) + + Eventually(func(g Gomega) { + fh := &fleet.HelmApp{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, fh) + g.Expect(err).ToNot(HaveOccurred()) + // check that the condition has the error + checkConditionContains( + g, + fh, + fleet.HelmAppAcceptedCondition, + corev1.ConditionFalse, + "tls: failed to verify certificate: x509: certificate signed by unknown authority", + ) + + }).Should(Succeed()) + }) + }) + + When("connecting to a https server with insecureTLSVerify set", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-insecure", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + })) + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + helmapp.Spec.InsecureSkipTLSverify = true + }) + + It("creates a bundle with the latest version it got from the index", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.2.0 as it is the + // latest in the test helm index.html + // set it here so the check passes and confirms + // the version obtained was 0.2.0 + helmapp.Spec.Helm.Version = "0.2.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + }) + }) + + When("connecting to a https server with no credentials", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-nocreds", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := newTLSServerWithAuth() + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + helmapp.Spec.InsecureSkipTLSverify = true + }) + + It("does not create a bundle and returns and sets an error due to bad auth", func() { + Consistently(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }, 5*time.Second, time.Second).Should(Succeed()) + + Eventually(func(g Gomega) { + fh := &fleet.HelmApp{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, fh) + g.Expect(err).ToNot(HaveOccurred()) + // check that the condition has the error + checkConditionContains( + g, + fh, + fleet.HelmAppAcceptedCondition, + corev1.ConditionFalse, + "error code: 401, response body: Unauthorized", + ) + + }).Should(Succeed()) + }) + }) + + When("connecting to a https server with wrong credentials in a secret", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-wrongcreds", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := newTLSServerWithAuth() + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + helmapp.Spec.InsecureSkipTLSverify = true + + // create secret with credentials + secretName := "supermegasecret" + doAfterNamespaceCreated = func() { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: helmapp.Namespace, + }, + Data: map[string][]byte{v1.BasicAuthUsernameKey: []byte(authUsername), v1.BasicAuthPasswordKey: []byte("badPassword")}, + Type: v1.SecretTypeBasicAuth, + } + err := k8sClient.Create(ctx, secret) + Expect(err).ToNot(HaveOccurred()) + } + + helmapp.Spec.HelmSecretName = secretName + }) + + It("does not create a bundle and returns and sets an error due to bad auth", func() { + Consistently(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(BeNil()) + g.Expect(errors.IsNotFound(err)).To(BeTrue(), err) + }, 5*time.Second, time.Second).Should(Succeed()) + + Eventually(func(g Gomega) { + fh := &fleet.HelmApp{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, fh) + g.Expect(err).ToNot(HaveOccurred()) + // check that the condition has the error + checkConditionContains( + g, + fh, + fleet.HelmAppAcceptedCondition, + corev1.ConditionFalse, + "error code: 401, response body: Unauthorized", + ) + + }).Should(Succeed()) + }) + }) + + When("connecting to a https server with correct credentials in a secret", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-creds", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + svr := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + })) + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + helmapp.Spec.InsecureSkipTLSverify = true + + // create secret with credentials + secretName := "supermegasecret" + doAfterNamespaceCreated = func() { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: helmapp.Namespace, + }, + Data: map[string][]byte{v1.BasicAuthUsernameKey: []byte(authUsername), v1.BasicAuthPasswordKey: []byte(authPassword)}, + Type: v1.SecretTypeBasicAuth, + } + err := k8sClient.Create(ctx, secret) + Expect(err).ToNot(HaveOccurred()) + } + + helmapp.Spec.HelmSecretName = secretName + }) + + It("creates a bundle with the latest version it got from the index", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.2.0 as it is the + // latest in the test helm index.html + // set it here so the check passes and confirms + // the version obtained was 0.2.0 + helmapp.Spec.Helm.Version = "0.2.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + }) + }) + + When("connecting to a https server with correct credentials in a secret and caBundle", func() { + BeforeEach(func() { + targets = []fleet.BundleTarget{} + helmapp = getRandomHelmAppWithTargets("test-cabundle", targets) + + // version is empty + helmapp.Spec.Helm.Version = "" + // reset secret, no auth is required + helmapp.Spec.HelmSecretName = "" + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, helmRepoIndex) + }) + + svr, err := getNewCustomTLSServer(handler) + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(func() { + svr.Close() + }) + + // set the url to the httptest server + helmapp.Spec.Helm.Repo = svr.URL + helmapp.Spec.Helm.Chart = "alpine" + + // create secret with credentials + secretName := "supermegasecret" + rootCert, err := os.ReadFile("assets/root.crt") + Expect(err).ToNot(HaveOccurred()) + doAfterNamespaceCreated = func() { + secret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: helmapp.Namespace, + }, + Data: map[string][]byte{ + v1.BasicAuthUsernameKey: []byte(authUsername), + v1.BasicAuthPasswordKey: []byte(authPassword), + // use the certificate from the httptest server + "cacerts": rootCert, + }, + Type: v1.SecretTypeBasicAuth, + } + err := k8sClient.Create(ctx, secret) + Expect(err).ToNot(HaveOccurred()) + } + + helmapp.Spec.HelmSecretName = secretName + helmapp.Spec.InsecureSkipTLSverify = false + }) + + It("creates a bundle with the latest version it got from the index", func() { + Eventually(func(g Gomega) { + bundle := &fleet.Bundle{} + ns := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + err := k8sClient.Get(ctx, ns, bundle) + g.Expect(err).ToNot(HaveOccurred()) + t := []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + // the original helmapp has no version defined. + // it should download version 0.2.0 as it is the + // latest in the test helm index.html + // set it here so the check passes and confirms + // the version obtained was 0.2.0 + helmapp.Spec.Helm.Version = "0.2.0" + checkBundleIsAsExpected(g, *bundle, helmapp, t) + }).Should(Succeed()) + }) + }) + }) +}) diff --git a/integrationtests/helmops/controller/status_test.go b/integrationtests/helmops/controller/status_test.go new file mode 100644 index 0000000000..55c50b9de3 --- /dev/null +++ b/integrationtests/helmops/controller/status_test.go @@ -0,0 +1,139 @@ +package controller + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/rancher/fleet/integrationtests/utils" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" +) + +var _ = Describe("HelmApp Status Fields", func() { + var ( + helmapp *fleet.HelmApp + bd *fleet.BundleDeployment + ) + + BeforeEach(func() { + var err error + namespace, err = utils.NewNamespaceName() + Expect(err).ToNot(HaveOccurred()) + + ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + Expect(k8sClient.Create(ctx, ns)).ToNot(HaveOccurred()) + + DeferCleanup(func() { + Expect(k8sClient.Delete(ctx, ns)).ToNot(HaveOccurred()) + }) + }) + + When("Bundle changes", func() { + BeforeEach(func() { + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + cluster, err := utils.CreateCluster(ctx, k8sClient, "cluster", namespace, nil, namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(cluster).To(Not(BeNil())) + targets := []fleet.BundleTarget{ + { + BundleDeploymentOptions: fleet.BundleDeploymentOptions{ + TargetNamespace: "targetNs", + }, + Name: "cluster", + ClusterName: "cluster", + }, + } + bundle, err := utils.CreateBundle(ctx, k8sClient, "name", namespace, targets, targets) + Expect(err).NotTo(HaveOccurred()) + Expect(bundle).To(Not(BeNil())) + + helmapp = &fleet.HelmApp{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-helmapp", + Namespace: namespace, + }, + Spec: fleet.HelmAppSpec{ + BundleSpec: fleet.BundleSpec{ + BundleDeploymentOptions: fleet.BundleDeploymentOptions{ + Helm: &fleet.HelmOptions{ + Chart: "test", + }, + }, + }, + }, + } + err = k8sClient.Create(ctx, helmapp) + Expect(err).NotTo(HaveOccurred()) + + bd = &fleet.BundleDeployment{} + Eventually(func(g Gomega) { + nsName := types.NamespacedName{Namespace: namespace, Name: "name"} + g.Expect(k8sClient.Get(ctx, nsName, bd)).ToNot(HaveOccurred()) + }).Should(Succeed()) + }) + + It("updates the status fields", func() { + bundle := &fleet.Bundle{} + bundleName := types.NamespacedName{Namespace: namespace, Name: "name"} + helmAppName := types.NamespacedName{Namespace: namespace, Name: helmapp.Name} + By("Receiving a bundle update") + Eventually(func() error { + err := k8sClient.Get(ctx, bundleName, bundle) + Expect(err).ToNot(HaveOccurred()) + bundle.Labels[fleet.HelmAppLabel] = helmapp.Name + return k8sClient.Update(ctx, bundle) + }).ShouldNot(HaveOccurred()) + Expect(bundle.Status.Summary.Ready).ToNot(Equal(1)) + + err := k8sClient.Get(ctx, helmAppName, helmapp) + Expect(err).ToNot(HaveOccurred()) + Expect(helmapp.Status.Summary.Ready).To(Equal(0)) + Expect(helmapp.Status.ReadyClusters).To(Equal(0)) + + // This simulates what the bundle deployment reconciler would do. + By("Updating the BundleDeployment status to ready") + bd := &fleet.BundleDeployment{} + Eventually(func() error { + err := k8sClient.Get(ctx, bundleName, bd) + if err != nil { + return err + } + bd.Status.Display.State = "Ready" + bd.Status.AppliedDeploymentID = bd.Spec.DeploymentID + bd.Status.Ready = true + bd.Status.NonModified = true + return k8sClient.Status().Update(ctx, bd) + }).ShouldNot(HaveOccurred()) + + // waiting for the bundle to update + Eventually(func() bool { + err := k8sClient.Get(ctx, bundleName, bundle) + Expect(err).NotTo(HaveOccurred()) + return bundle.Status.Summary.Ready == 1 + }).Should(BeTrue()) + + err = k8sClient.Get(ctx, helmAppName, helmapp) + Expect(err).ToNot(HaveOccurred()) + Expect(helmapp.Status.Summary.Ready).To(Equal(1)) + Expect(helmapp.Status.ReadyClusters).To(Equal(1)) + Expect(helmapp.Status.DesiredReadyClusters).To(Equal(1)) + + By("Deleting a bundle") + err = k8sClient.Delete(ctx, bundle) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, helmAppName, helmapp) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(helmapp.Status.Summary.Ready).To(Equal(0)) + g.Expect(helmapp.Status.Summary.DesiredReady).To(Equal(0)) + g.Expect(helmapp.Status.Display.ReadyBundleDeployments).To(Equal("0/0")) + }).Should(Succeed()) + }) + }) +}) diff --git a/integrationtests/helmops/controller/suite_test.go b/integrationtests/helmops/controller/suite_test.go new file mode 100644 index 0000000000..9b74a1ebfa --- /dev/null +++ b/integrationtests/helmops/controller/suite_test.go @@ -0,0 +1,121 @@ +package controller + +import ( + "bytes" + "context" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + + "github.com/rancher/fleet/internal/cmd/controller/helmops/reconciler" + ctrlreconciler "github.com/rancher/fleet/internal/cmd/controller/reconciler" + "github.com/rancher/fleet/internal/cmd/controller/target" + "github.com/rancher/fleet/internal/config" + "github.com/rancher/fleet/internal/manifest" + v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +const ( + timeout = 60 * time.Second +) + +var ( + cfg *rest.Config + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + k8sClient client.Client + logsBuffer bytes.Buffer + namespace string + k8sClientSet *kubernetes.Clientset +) + +func TestGitJobController(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Helm AppOps Controller Suite") +} + +var _ = BeforeSuite(func() { + SetDefaultEventuallyTimeout(timeout) + ctx, cancel = context.WithCancel(context.TODO()) + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "charts", "fleet-crd", "templates", "crds.yaml")}, + ErrorIfCRDPathMissing: true, + } + + var err error + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClientSet, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + ctlr := gomock.NewController(GinkgoT()) + + // redirect logs to a buffer that we can read in the tests + GinkgoWriter.TeeTo(&logsBuffer) + ctrl.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + config.Set(&config.Config{}) + + err = (&reconciler.HelmAppReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("helmops-controller"), + }).SetupWithManager(mgr) + Expect(err).ToNot(HaveOccurred()) + + err = (&reconciler.HelmAppStatusReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr) + Expect(err).ToNot(HaveOccurred()) + + store := manifest.NewStore(mgr.GetClient()) + builder := target.New(mgr.GetClient()) + err = (&ctrlreconciler.BundleReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Builder: builder, + Store: store, + Query: builder, + }).SetupWithManager(mgr) + Expect(err).ToNot(HaveOccurred(), "failed to set up manager") + + go func() { + defer GinkgoRecover() + defer ctlr.Finish() + err = mgr.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() +}) + +var _ = AfterSuite(func() { + cancel() + Expect(testEnv.Stop()).ToNot(HaveOccurred()) +}) diff --git a/integrationtests/mocks/fleet_controller_mock.go b/integrationtests/mocks/fleet_controller_mock.go index 4f52f3b0cc..8fc93fe980 100644 --- a/integrationtests/mocks/fleet_controller_mock.go +++ b/integrationtests/mocks/fleet_controller_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1 (interfaces: Interface) +// Source: pkg/generated/controllers/fleet.cattle.io/v1alpha1/interface.go // Package mocks is a generated GoMock package. package mocks @@ -146,6 +146,20 @@ func (mr *FleetInterfaceMockRecorder) Content() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Content", reflect.TypeOf((*FleetInterface)(nil).Content)) } +// HelmApp mocks base method. +func (m *FleetInterface) HelmApp() v1alpha1.HelmAppController { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HelmApp") + ret0, _ := ret[0].(v1alpha1.HelmAppController) + return ret0 +} + +// HelmApp indicates an expected call of HelmApp. +func (mr *FleetInterfaceMockRecorder) HelmApp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HelmApp", reflect.TypeOf((*FleetInterface)(nil).HelmApp)) +} + // GitRepo mocks base method. func (m *FleetInterface) GitRepo() v1alpha1.GitRepoController { m.ctrl.T.Helper() diff --git a/integrationtests/utils/helpers.go b/integrationtests/utils/helpers.go index 9dea08348a..885669221b 100644 --- a/integrationtests/utils/helpers.go +++ b/integrationtests/utils/helpers.go @@ -34,6 +34,33 @@ func CreateBundle(ctx context.Context, k8sClient client.Client, name, namespace return &bundle, k8sClient.Create(ctx, &bundle) } +func CreateHelmBundle(ctx context.Context, k8sClient client.Client, name, namespace string, targets []v1alpha1.BundleTarget, targetRestrictions []v1alpha1.BundleTarget, helmOptions *v1alpha1.BundleHelmOptions) (*v1alpha1.Bundle, error) { + restrictions := []v1alpha1.BundleTargetRestriction{} + for _, r := range targetRestrictions { + restrictions = append(restrictions, v1alpha1.BundleTargetRestriction{ + Name: r.Name, + ClusterName: r.ClusterName, + ClusterSelector: r.ClusterSelector, + ClusterGroup: r.ClusterGroup, + ClusterGroupSelector: r.ClusterGroupSelector, + }) + } + bundle := v1alpha1.Bundle{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"foo": "bar"}, + }, + Spec: v1alpha1.BundleSpec{ + Targets: targets, + TargetRestrictions: restrictions, + HelmAppOptions: helmOptions, + }, + } + + return &bundle, k8sClient.Create(ctx, &bundle) +} + func CreateCluster(ctx context.Context, k8sClient client.Client, name, controllerNs string, labels map[string]string, clusterNs string) (*v1alpha1.Cluster, error) { cluster := &v1alpha1.Cluster{ ObjectMeta: metav1.ObjectMeta{ diff --git a/internal/bundlereader/auth.go b/internal/bundlereader/auth.go new file mode 100644 index 0000000000..c858bb09f2 --- /dev/null +++ b/internal/bundlereader/auth.go @@ -0,0 +1,54 @@ +package bundlereader + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Auth struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + CABundle []byte `json:"caBundle,omitempty"` + SSHPrivateKey []byte `json:"sshPrivateKey,omitempty"` + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` +} + +func ReadHelmAuthFromSecret(ctx context.Context, c client.Client, req types.NamespacedName) (Auth, error) { + if req.Name == "" { + return Auth{}, nil + } + secret := &corev1.Secret{} + err := c.Get(ctx, req, secret) + if err != nil { + return Auth{}, err + } + + auth := Auth{} + username, okUsername := secret.Data[corev1.BasicAuthUsernameKey] + if okUsername { + auth.Username = string(username) + } + + password, okPasswd := secret.Data[corev1.BasicAuthPasswordKey] + if okPasswd { + auth.Password = string(password) + } + + // check that username and password are both set or none is set + if okUsername && !okPasswd { + return Auth{}, fmt.Errorf("%s is set in the secret, but %s isn't", corev1.BasicAuthUsernameKey, corev1.BasicAuthPasswordKey) + } else if !okUsername && okPasswd { + return Auth{}, fmt.Errorf("%s is set in the secret, but %s isn't", corev1.BasicAuthPasswordKey, corev1.BasicAuthUsernameKey) + } + + caBundle, ok := secret.Data["cacerts"] + if ok { + auth.CABundle = caBundle + } + + return auth, nil +} diff --git a/internal/bundlereader/auth_test.go b/internal/bundlereader/auth_test.go new file mode 100644 index 0000000000..3395813025 --- /dev/null +++ b/internal/bundlereader/auth_test.go @@ -0,0 +1,162 @@ +package bundlereader_test + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/rancher/fleet/internal/bundlereader" + "github.com/rancher/fleet/internal/mocks" +) + +// nolint: funlen +func TestReadHelmAuthFromSecret(t *testing.T) { + cases := []struct { + name string + secretData map[string][]byte + getError string + expectedAuth bundlereader.Auth + expectedErrNotNil bool + expectedError string + }{ + { + name: "nothing is set", + secretData: map[string][]byte{}, + getError: "", + expectedAuth: bundlereader.Auth{ + // default values + }, + expectedErrNotNil: false, + expectedError: "", + }, + { + name: "username, password and caBundle are set", + secretData: map[string][]byte{ + corev1.BasicAuthUsernameKey: []byte("user"), + corev1.BasicAuthPasswordKey: []byte("passwd"), + "cacerts": []byte("test_cabundle"), + }, + getError: "", + expectedAuth: bundlereader.Auth{ + Username: "user", + Password: "passwd", + CABundle: []byte("test_cabundle"), + }, + expectedErrNotNil: false, + expectedError: "", + }, + { + name: "username, password are set, caBundle is not", + secretData: map[string][]byte{ + corev1.BasicAuthUsernameKey: []byte("user"), + corev1.BasicAuthPasswordKey: []byte("passwd"), + }, + getError: "", + expectedAuth: bundlereader.Auth{ + Username: "user", + Password: "passwd", + }, + expectedErrNotNil: false, + expectedError: "", + }, + { + name: "caBundle is set, username and password are not", + secretData: map[string][]byte{ + "cacerts": []byte("test_cabundle"), + }, + getError: "", + expectedAuth: bundlereader.Auth{ + CABundle: []byte("test_cabundle"), + }, + expectedErrNotNil: false, + expectedError: "", + }, + { + name: "username, caBundle are set, password is not", + secretData: map[string][]byte{ + corev1.BasicAuthUsernameKey: []byte("user"), + "cacerts": []byte("test_cabundle"), + }, + getError: "", + expectedAuth: bundlereader.Auth{}, + expectedErrNotNil: true, + expectedError: "username is set in the secret, but password isn't", + }, + { + name: "username is set, password and caBundle are not", + secretData: map[string][]byte{ + corev1.BasicAuthUsernameKey: []byte("user"), + }, + getError: "", + expectedAuth: bundlereader.Auth{}, + expectedErrNotNil: true, + expectedError: "username is set in the secret, but password isn't", + }, + { + name: "password, caBundle are set, username is not", + secretData: map[string][]byte{ + corev1.BasicAuthPasswordKey: []byte("passwd"), + "cacerts": []byte("test_cabundle"), + }, + getError: "", + expectedAuth: bundlereader.Auth{}, + expectedErrNotNil: true, + expectedError: "password is set in the secret, but username isn't", + }, + { + name: "password is set, username and caBundle are not", + secretData: map[string][]byte{ + corev1.BasicAuthPasswordKey: []byte("passwd"), + }, + getError: "", + expectedAuth: bundlereader.Auth{}, + expectedErrNotNil: true, + expectedError: "password is set in the secret, but username isn't", + }, + { + name: "username, password and caBundle are set, but we get an error getting the secret", + secretData: map[string][]byte{ + corev1.BasicAuthPasswordKey: []byte("passwd"), + }, + getError: "error getting secret", + expectedAuth: bundlereader.Auth{}, + expectedErrNotNil: true, + expectedError: "error getting secret", + }, + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockClient := mocks.NewMockClient(mockCtrl) + + assert := assert.New(t) + for _, c := range cases { + if c.getError != "" { + mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, _ types.NamespacedName, secret *corev1.Secret, _ ...interface{}) error { + return fmt.Errorf(c.getError) // nolint:govet + }, + ) + } else { + mockClient.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, _ types.NamespacedName, secret *corev1.Secret, _ ...interface{}) error { + secret.Data = c.secretData + return nil + }, + ) + } + + nsName := types.NamespacedName{Name: "test", Namespace: "test"} + auth, err := bundlereader.ReadHelmAuthFromSecret(context.TODO(), mockClient, nsName) + assert.Equal(c.expectedErrNotNil, err != nil) + if err != nil && c.expectedErrNotNil { + assert.Equal(c.expectedError, err.Error()) + } + assert.Equal(auth, c.expectedAuth) + } +} diff --git a/internal/bundlereader/charturl.go b/internal/bundlereader/charturl.go index b01982a7f7..c34e438583 100644 --- a/internal/bundlereader/charturl.go +++ b/internal/bundlereader/charturl.go @@ -14,10 +14,36 @@ import ( "sigs.k8s.io/yaml" ) +// ChartVersion returns the version of the helm chart from a helm repo server, by +// inspecting the repo's index.yaml +func ChartVersion(location fleet.HelmOptions, auth Auth) (string, error) { + if hasOCIURL.MatchString(location.Chart) { + return location.Version, nil + } + + if location.Repo == "" { + return location.Version, nil + } + + if !strings.HasSuffix(location.Repo, "/") { + location.Repo = location.Repo + "/" + } + + chart, err := getHelmChartVersion(location, auth) + if err != nil { + return "", err + } + + if len(chart.URLs) == 0 { + return "", fmt.Errorf("no URLs found for chart %s %s at %s", chart.Name, chart.Version, location.Repo) + } + + return chart.Version, nil +} + // chartURL returns the URL to the helm chart from a helm repo server, by // inspecting the repo's index.yaml -func chartURL(location *fleet.HelmOptions, auth Auth) (string, error) { - // repos are not supported in case of OCI Charts +func chartURL(location fleet.HelmOptions, auth Auth) (string, error) { if hasOCIURL.MatchString(location.Chart) { return location.Chart, nil } @@ -30,11 +56,40 @@ func chartURL(location *fleet.HelmOptions, auth Auth) (string, error) { location.Repo = location.Repo + "/" } - request, err := http.NewRequest("GET", location.Repo+"index.yaml", nil) + chart, err := getHelmChartVersion(location, auth) + if err != nil { + return "", err + } + + if len(chart.URLs) == 0 { + return "", fmt.Errorf("no URLs found for chart %s %s at %s", chart.Name, chart.Version, location.Repo) + } + + chartURL, err := url.Parse(chart.URLs[0]) if err != nil { return "", err } + if chartURL.IsAbs() { + return chart.URLs[0], nil + } + + repoURL, err := url.Parse(location.Repo) + if err != nil { + return "", err + } + + return repoURL.ResolveReference(chartURL).String(), nil +} + +// getHelmChartVersion returns the ChartVersion struct with the information to the given location +// using the given authentication configuration +func getHelmChartVersion(location fleet.HelmOptions, auth Auth) (*repo.ChartVersion, error) { + request, err := http.NewRequest("GET", location.Repo+"index.yaml", nil) + if err != nil { + return nil, err + } + if auth.Username != "" && auth.Password != "" { request.SetBasicAuth(auth.Username, auth.Password) } @@ -47,56 +102,47 @@ func chartURL(location *fleet.HelmOptions, auth Auth) (string, error) { pool.AppendCertsFromPEM(auth.CABundle) transport := http.DefaultTransport.(*http.Transport).Clone() transport.TLSClientConfig = &tls.Config{ - RootCAs: pool, - MinVersion: tls.VersionTLS12, + RootCAs: pool, + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: auth.InsecureSkipVerify, // nolint:gosec } client.Transport = transport + } else { + if auth.InsecureSkipVerify { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: auth.InsecureSkipVerify, // nolint:gosec + } + client.Transport = transport + } } resp, err := client.Do(request) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() bytes, err := io.ReadAll(resp.Body) if err != nil { - return "", err + return nil, err } if resp.StatusCode != 200 { - return "", fmt.Errorf("failed to read helm repo from %s, error code: %v, response body: %s", location.Repo+"index.yaml", resp.StatusCode, bytes) + return nil, fmt.Errorf("failed to read helm repo from %s, error code: %v, response body: %s", location.Repo+"index.yaml", resp.StatusCode, bytes) } repo := &repo.IndexFile{} if err := yaml.Unmarshal(bytes, repo); err != nil { - return "", err + return nil, err } repo.SortEntries() chart, err := repo.Get(location.Chart, location.Version) if err != nil { - return "", err + return nil, err } - if len(chart.URLs) == 0 { - return "", fmt.Errorf("no URLs found for chart %s %s at %s", chart.Name, chart.Version, location.Repo) - } - - chartURL, err := url.Parse(chart.URLs[0]) - if err != nil { - return "", err - } - - if chartURL.IsAbs() { - return chart.URLs[0], nil - } - - repoURL, err := url.Parse(location.Repo) - if err != nil { - return "", err - } - - return repoURL.ResolveReference(chartURL).String(), nil + return chart, nil } diff --git a/internal/bundlereader/helm.go b/internal/bundlereader/helm.go new file mode 100644 index 0000000000..5a05975e89 --- /dev/null +++ b/internal/bundlereader/helm.go @@ -0,0 +1,45 @@ +package bundlereader + +import ( + "context" + "fmt" + "os" + + "github.com/rancher/fleet/internal/manifest" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetManifestFromHelmChart downloads the given helm chart and creates a manifest with its contents +func GetManifestFromHelmChart(ctx context.Context, c client.Client, bd *fleet.BundleDeployment) (*manifest.Manifest, error) { + helm := bd.Spec.Options.Helm + + if helm == nil { + return nil, fmt.Errorf("helm options not found") + } + temp, err := os.MkdirTemp("", "helmapp") + if err != nil { + return nil, err + } + defer os.RemoveAll(temp) + + nsName := types.NamespacedName{Namespace: bd.Namespace, Name: bd.Spec.HelmChartOptions.SecretName} + auth, err := ReadHelmAuthFromSecret(ctx, c, nsName) + if err != nil { + return nil, err + } + auth.InsecureSkipVerify = bd.Spec.HelmChartOptions.InsecureSkipTLSverify + + chartURL, err := chartURL(*helm, auth) + if err != nil { + return nil, err + } + + resources, err := loadDirectory(ctx, false, false, checksum(helm), temp, chartURL, helm.Version, auth) + if err != nil { + return nil, err + } + + return manifest.New(resources), nil +} diff --git a/internal/bundlereader/helm_test.go b/internal/bundlereader/helm_test.go new file mode 100644 index 0000000000..fe3d6a2a67 --- /dev/null +++ b/internal/bundlereader/helm_test.go @@ -0,0 +1,448 @@ +package bundlereader_test + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + "crypto/subtle" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/rancher/fleet/internal/bundlereader" + "github.com/rancher/fleet/internal/mocks" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" +) + +const ( + authUsername = "holadonpepito" + authPassword = "holadonjose" + chartName = "sleeper-chart" + helmRepoIndex = `apiVersion: v1 +entries: + sleeper: + - created: 2016-10-06T16:23:20.499814565-06:00 + description: Super sleeper chart + digest: 99c76e403d752c84ead610644d4b1c2f2b453a74b921f422b9dcb8a7c8b559cd + home: https://helm.sh/helm + name: alpine + sources: + - https://github.com/helm/helm + urls: + - https://##URL##/sleeper-chart-0.1.0.tgz + version: 0.1.0 +generated: 2016-10-06T16:23:20.499029981-06:00` + + chartYAML = `apiVersion: v2 +appVersion: 1.16.0 +description: A test chart +name: sleeper-chart +type: application +version: 0.1.0` + + values = `replicaCount: 1` + + deployment = `apiVersion: apps/v1 +kind: Deployment +metadata: + name: sleeper + labels: + fleet: testing +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app: sleeper + template: + metadata: + labels: + app: sleeper + spec: + containers: + - name: {{ .Chart.Name }} + command: + - sleep + - 7d + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "rancher/mirrored-library-busybox:1.34.1" + imagePullPolicy: IfNotPresent` +) + +func checksumPrefix(helm *fleet.HelmOptions) string { + if helm == nil { + return "none" + } + return fmt.Sprintf(".chart/%x", sha256.Sum256([]byte(helm.Chart + ":" + helm.Repo + ":" + helm.Version)[:])) +} + +func createChartDir(dir string) error { + // create the chart directories and copy the files + chartDir := filepath.Join(dir, chartName) + if err := os.Mkdir(chartDir, 0755); err != nil { + return err + } + + templatesDir := filepath.Join(chartDir, "templates") + if err := os.Mkdir(templatesDir, 0755); err != nil { + return err + } + if err := createFileFromString(chartDir, "Chart.yaml", chartYAML); err != nil { + return err + } + if err := createFileFromString(chartDir, "values.yaml", values); err != nil { + return err + } + if err := createFileFromString(templatesDir, "deployment.yaml", deployment); err != nil { + return err + } + + return nil +} + +func compressFolder(src string, buf io.Writer) error { + zr := gzip.NewWriter(buf) + defer zr.Close() + tw := tar.NewWriter(zr) + defer tw.Close() + + return filepath.Walk(src, func(file string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + header, err := tar.FileInfoHeader(fi, file) + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, file) + if err != nil { + return err + } + header.Name = filepath.ToSlash(relPath) + + if err := tw.WriteHeader(header); err != nil { + return err + } + + if !fi.IsDir() { + data, err := os.Open(file) + if err != nil { + return err + } + defer data.Close() + + _, err = io.Copy(tw, data) + if err != nil { + return err + } + } + return nil + }) +} + +func createFileFromString(dir, fileName, data string) error { + path := filepath.Join(dir, fileName) + return os.WriteFile(path, []byte(data), 0644) +} + +func createHelmChartGZIP() (string, string, error) { + temp, err := os.MkdirTemp("", "charts_tmp") + if err != nil { + return "", "", err + } + defer os.RemoveAll(temp) + + if err := createChartDir(temp); err != nil { + return "", "", err + } + + var buf bytes.Buffer + if err := compressFolder(temp, &buf); err != nil { + return "", "", err + } + + finalDir, err := os.MkdirTemp("", "chart") + if err != nil { + return "", "", err + } + + gzipPath := filepath.Join(finalDir, "sleeper-chart-0.1.0.tgz") + err = os.WriteFile(gzipPath, buf.Bytes(), os.ModePerm) + if err != nil { + return finalDir, "", err + } + + return finalDir, gzipPath, nil +} + +func newTLSServer(index string, withAuth bool) *httptest.Server { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if withAuth { + username, password, ok := r.BasicAuth() + if ok { + usernameHash := sha256.Sum256([]byte(username)) + passwordHash := sha256.Sum256([]byte(password)) + expectedUsernameHash := sha256.Sum256([]byte(authUsername)) + expectedPasswordHash := sha256.Sum256([]byte(authPassword)) + + usernameMatch := (subtle.ConstantTimeCompare(usernameHash[:], expectedUsernameHash[:]) == 1) + passwordMatch := (subtle.ConstantTimeCompare(passwordHash[:], expectedPasswordHash[:]) == 1) + + if !usernameMatch || !passwordMatch { + w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + } + } + + w.WriteHeader(http.StatusOK) + if r.URL.Path == "/index.yaml" { + index = strings.Replace(index, "##URL##", r.Host, -1) + fmt.Fprint(w, index) + } else if r.URL.Path == "/sleeper-chart-0.1.0.tgz" { + dir, chartPath, err := createHelmChartGZIP() + if dir != "" { + defer os.RemoveAll(dir) + } + + if err != nil { + fmt.Printf("%v", err) + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, err.Error()) + return + } + f, err := os.Open(chartPath) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, err.Error()) + } + defer f.Close() + + _, err = io.Copy(w, f) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprint(w, err.Error()) + } + } + })) + return srv +} + +// nolint: funlen +func TestGetManifestFromHelmChart(t *testing.T) { + cases := []struct { + name string + bd fleet.BundleDeployment + clientCalls func(*mocks.MockClient) + requiresAuth bool + expectedNilManifest bool + expectedResources []fleet.BundleResource + expectedErrNotNil bool + expectedError string + }{ + { + name: "no helm options", + bd: fleet.BundleDeployment{ + Spec: fleet.BundleDeploymentSpec{ + Options: fleet.BundleDeploymentOptions{ + Helm: nil, + }, + }, + }, + clientCalls: func(c *mocks.MockClient) {}, + requiresAuth: false, + expectedNilManifest: true, + expectedResources: []fleet.BundleResource{}, + expectedErrNotNil: true, + expectedError: "helm options not found", + }, + { + name: "error reading secret", + bd: fleet.BundleDeployment{ + Spec: fleet.BundleDeploymentSpec{ + Options: fleet.BundleDeploymentOptions{ + Helm: &fleet.HelmOptions{}, + }, + HelmChartOptions: &fleet.BundleHelmOptions{ + SecretName: "invalid-secret", + }, + }, + }, + clientCalls: func(c *mocks.MockClient) { + c.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(fmt.Errorf("secret not found")) + }, + requiresAuth: false, + expectedNilManifest: true, + expectedResources: []fleet.BundleResource{}, + expectedErrNotNil: true, + expectedError: "secret not found", + }, + { + name: "authentication error", + bd: fleet.BundleDeployment{ + Spec: fleet.BundleDeploymentSpec{ + Options: fleet.BundleDeploymentOptions{ + Helm: &fleet.HelmOptions{ + Repo: "##URL##", // will be replaced by the mock server url + }, + }, + HelmChartOptions: &fleet.BundleHelmOptions{ + SecretName: "secretdoesnotexist", + InsecureSkipTLSverify: true, + }, + }, + }, + clientCalls: func(c *mocks.MockClient) { + c.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, _ types.NamespacedName, secret *corev1.Secret, _ ...interface{}) error { + secret.Data = make(map[string][]byte) + secret.Data[corev1.BasicAuthUsernameKey] = []byte(authUsername) + secret.Data[corev1.BasicAuthPasswordKey] = []byte("bad password") + return nil + }, + ) + }, + requiresAuth: true, + expectedNilManifest: true, + expectedResources: []fleet.BundleResource{}, + expectedErrNotNil: true, + expectedError: "failed to read helm repo from ##URL##/index.yaml, error code: 401, response body: Unauthorized\n", + }, + { + name: "tls error", + bd: fleet.BundleDeployment{ + Spec: fleet.BundleDeploymentSpec{ + Options: fleet.BundleDeploymentOptions{ + Helm: &fleet.HelmOptions{ + Repo: "##URL##", // will be replaced by the mock server url + }, + }, + HelmChartOptions: &fleet.BundleHelmOptions{ + SecretName: "secretdoesnotexist", + InsecureSkipTLSverify: false, + }, + }, + }, + clientCalls: func(c *mocks.MockClient) { + c.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + requiresAuth: false, + expectedNilManifest: true, + expectedResources: []fleet.BundleResource{}, + expectedErrNotNil: true, + expectedError: "Get \"##URL##/index.yaml\": tls: failed to verify certificate: x509: certificate signed by unknown authority", + }, + { + name: "load directory no version specified", + bd: fleet.BundleDeployment{ + Spec: fleet.BundleDeploymentSpec{ + Options: fleet.BundleDeploymentOptions{ + Helm: &fleet.HelmOptions{ + Repo: "##URL##", // will be replaced by the mock server url + Chart: "sleeper", + }, + }, + HelmChartOptions: &fleet.BundleHelmOptions{ + InsecureSkipTLSverify: true, + }, + }, + }, + clientCalls: func(c *mocks.MockClient) {}, + requiresAuth: false, + expectedNilManifest: false, + expectedResources: []fleet.BundleResource{ + { + Name: "sleeper-chart/templates/deployment.yaml", + Content: deployment, + }, + { + Name: "sleeper-chart/values.yaml", + Content: values, + }, + { + Name: "sleeper-chart/Chart.yaml", + Content: chartYAML, + }, + }, + expectedErrNotNil: false, + expectedError: "", + }, + } + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockClient := mocks.NewMockClient(mockCtrl) + + assert := assert.New(t) + for _, c := range cases { + // set expected calls to client mock + c.clientCalls(mockClient) + + // start mock server for test + srv := newTLSServer(helmRepoIndex, c.requiresAuth) + defer srv.Close() + + resourcePrefix := "" + if c.bd.Spec.Options.Helm != nil { + c.bd.Spec.Options.Helm.Repo = strings.Replace(c.bd.Spec.Options.Helm.Repo, "##URL##", srv.URL, -1) + // resource names have a prefix that depends on helm options + resourcePrefix = checksumPrefix(c.bd.Spec.Options.Helm) + } + // change the url in the error in case it is present + c.expectedError = strings.Replace(c.expectedError, "##URL##", srv.URL, -1) + + manifest, err := bundlereader.GetManifestFromHelmChart(context.TODO(), mockClient, &c.bd) + + assert.Equal(c.expectedNilManifest, manifest == nil) + assert.Equal(c.expectedErrNotNil, err != nil) + if err != nil && c.expectedErrNotNil { + assert.Equal(c.expectedError, err.Error()) + } + if manifest != nil { + // check that all expected resources are found + for _, expectedRes := range c.expectedResources { + // find the resource in the expected ones + found := false + for _, r := range manifest.Resources { + if fmt.Sprintf("%s/%s", resourcePrefix, expectedRes.Name) == r.Name { + found = true + assert.Equal(expectedRes.Content, r.Content) + } + } + if !found { + t.Errorf("expected resource %s was not found", expectedRes.Name) + } + } + + // check that all of the returned resources are also expected + for _, r := range manifest.Resources { + // find the resource in the expected ones + found := false + for _, expectedRes := range c.expectedResources { + if fmt.Sprintf("%s/%s", resourcePrefix, expectedRes.Name) == r.Name { + found = true + assert.Equal(expectedRes.Content, r.Content) + } + } + if !found { + t.Errorf("returned resource %s was not expected", r.Name) + } + } + } + } +} diff --git a/internal/bundlereader/loaddirectory.go b/internal/bundlereader/loaddirectory.go index f63e7f2728..a5c8138edb 100644 --- a/internal/bundlereader/loaddirectory.go +++ b/internal/bundlereader/loaddirectory.go @@ -20,12 +20,28 @@ import ( "github.com/rancher/fleet/internal/content" "github.com/rancher/fleet/internal/helmupdater" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" - "helm.sh/helm/v3/pkg/cli" "helm.sh/helm/v3/pkg/downloader" helmgetter "helm.sh/helm/v3/pkg/getter" "helm.sh/helm/v3/pkg/registry" ) +var ( + registryClient *registry.Client + + fleetOciProvider = helmgetter.Provider{ + Schemes: []string{registry.OCIScheme}, + New: NewFleetOCIProvider, + } +) + +func NewFleetOCIProvider(options ...helmgetter.Option) (helmgetter.Getter, error) { + if registryClient == nil { + return nil, fmt.Errorf("oci registry client is nil") + } + + return helmgetter.NewOCIGetter(helmgetter.WithRegistryClient(registryClient)) +} + // ignoreTree represents a tree of ignored paths (read from .fleetignore files), each node being a directory. // It provides a means for ignored paths to be propagated down the tree, but not between subdirectories of a same // directory. @@ -322,7 +338,6 @@ func GetContent(ctx context.Context, base, source, version string, auth Auth, di // downloadOCIChart uses Helm to download charts from OCI based registries func downloadOCIChart(name, version, path string, auth Auth) (string, error) { - var registryClient *registry.Client var requiresLogin bool = auth.Username != "" && auth.Password != "" url, err := url.Parse(name) @@ -330,7 +345,17 @@ func downloadOCIChart(name, version, path string, auth Auth) (string, error) { return "", err } - registryClient, err = registry.NewClient() + temp, err := os.MkdirTemp("", "creds") + if err != nil { + return "", err + } + defer os.RemoveAll(temp) + + tmpGetter := newHttpGetter(auth) + registryClient, err = registry.NewClient( + registry.ClientOptCredentialsFile(filepath.Join(temp, "creds.json")), + registry.ClientOptHTTPClient(tmpGetter.Client), + ) if err != nil { return "", err } @@ -346,7 +371,7 @@ func downloadOCIChart(name, version, path string, auth Auth) (string, error) { err = registryClient.Login( addr, - registry.LoginOptInsecure(false), + registry.LoginOptInsecure(auth.InsecureSkipVerify), registry.LoginOptBasicAuth(auth.Username, auth.Password), ) if err != nil { @@ -354,10 +379,17 @@ func downloadOCIChart(name, version, path string, auth Auth) (string, error) { } } + getterOptions := []helmgetter.Option{} + if auth.Username != "" && auth.Password != "" { + getterOptions = append(getterOptions, helmgetter.WithBasicAuth(auth.Username, auth.Password)) + } + getterOptions = append(getterOptions, helmgetter.WithInsecureSkipVerifyTLS(true)) + c := downloader.ChartDownloader{ Verify: downloader.VerifyNever, - Getters: helmgetter.All(&cli.EnvSettings{}), + Getters: helmgetter.Providers{fleetOciProvider}, RegistryClient: registryClient, + Options: getterOptions, } saved, _, err := c.DownloadTo(name, version, path) @@ -394,11 +426,19 @@ func newHttpGetter(auth Auth) *getter.HttpGetter { pool.AppendCertsFromPEM(auth.CABundle) transport := http.DefaultTransport.(*http.Transport).Clone() transport.TLSClientConfig = &tls.Config{ - RootCAs: pool, - MinVersion: tls.VersionTLS12, + RootCAs: pool, + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: auth.InsecureSkipVerify, // nolint:gosec + } + httpGetter.Client.Transport = transport + } else if auth.InsecureSkipVerify { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: auth.InsecureSkipVerify, // nolint:gosec } httpGetter.Client.Transport = transport } + return httpGetter } diff --git a/internal/bundlereader/resources.go b/internal/bundlereader/resources.go index fb7ab732d6..55d4b3185a 100644 --- a/internal/bundlereader/resources.go +++ b/internal/bundlereader/resources.go @@ -22,13 +22,6 @@ import ( var hasOCIURL = regexp.MustCompile(`^oci:\/\/`) -type Auth struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - CABundle []byte `json:"caBundle,omitempty"` - SSHPrivateKey []byte `json:"sshPrivateKey,omitempty"` -} - // readResources reads and downloads all resources from the bundle func readResources(ctx context.Context, spec *fleet.BundleSpec, compress bool, base string, auth Auth, helmRepoURLRegex string) ([]fleet.BundleResource, error) { directories, err := addDirectory(base, ".", ".") @@ -167,7 +160,7 @@ func addRemoteCharts(directories []directory, base string, charts []*fleet.HelmO auth = Auth{} } - chartURL, err := chartURL(chart, auth) + chartURL, err := chartURL(*chart, auth) if err != nil { return nil, err } diff --git a/internal/cmd/agent/deployer/deployer.go b/internal/cmd/agent/deployer/deployer.go index 58142ae675..34200e8e63 100644 --- a/internal/cmd/agent/deployer/deployer.go +++ b/internal/cmd/agent/deployer/deployer.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + "github.com/rancher/fleet/internal/bundlereader" "github.com/rancher/fleet/internal/helmdeployer" "github.com/rancher/fleet/internal/manifest" "github.com/rancher/fleet/internal/ociwrapper" @@ -64,6 +65,7 @@ func (d *Deployer) DeployBundle(ctx context.Context, bd *fleet.BundleDeployment) } releaseID, err := d.helmdeploy(ctx, logger, bd) + if err != nil { // When an error from DeployBundle is returned it causes DeployBundle // to requeue and keep trying to deploy on a loop. If there is something @@ -108,8 +110,8 @@ func (d *Deployer) helmdeploy(ctx context.Context, logger logr.Logger, bd *fleet } manifestID, _ := kv.Split(bd.Spec.DeploymentID, ":") var ( - manifest *manifest.Manifest - err error + m *manifest.Manifest + err error ) if bd.Spec.OCIContents { // First we need to access the secret where the OCI registry reference and credentials are located @@ -140,19 +142,24 @@ func (d *Deployer) helmdeploy(ctx context.Context, logger logr.Logger, bd *fleet InsecureSkipTLS: insecure, } oci := ociwrapper.NewOCIWrapper() - manifest, err = oci.PullManifest(ctx, ociOpts, manifestID) + m, err = oci.PullManifest(ctx, ociOpts, manifestID) + if err != nil { + return "", err + } + } else if bd.Spec.HelmChartOptions != nil { + m, err = bundlereader.GetManifestFromHelmChart(ctx, d.client, bd) if err != nil { return "", err } } else { - manifest, err = d.lookup.Get(ctx, d.upstreamClient, manifestID) + m, err = d.lookup.Get(ctx, d.upstreamClient, manifestID) if err != nil { return "", err } } - manifest.Commit = bd.Labels[fleet.CommitLabel] - release, err := d.helm.Deploy(ctx, bd.Name, manifest, bd.Spec.Options) + m.Commit = bd.Labels[fleet.CommitLabel] + release, err := d.helm.Deploy(ctx, bd.Name, m, bd.Spec.Options) if err != nil { return "", err } diff --git a/internal/cmd/controller/agentmanagement/agent/manifest.go b/internal/cmd/controller/agentmanagement/agent/manifest.go index 65e991630b..ac71e7f182 100644 --- a/internal/cmd/controller/agentmanagement/agent/manifest.go +++ b/internal/cmd/controller/agentmanagement/agent/manifest.go @@ -220,6 +220,10 @@ func agentApp(namespace string, agentScope string, opts ManifestOptions) *appsv1 Name: "kube", MountPath: "/.kube", }, + { + Name: "tmp", + MountPath: "/tmp", + }, }, }, { @@ -250,6 +254,12 @@ func agentApp(namespace string, agentScope string, opts ManifestOptions) *appsv1 EmptyDir: &corev1.EmptyDirVolumeSource{}, }, }, + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, }, NodeSelector: map[string]string{"kubernetes.io/os": "linux"}, Affinity: &corev1.Affinity{ diff --git a/internal/cmd/controller/finalize/finalize.go b/internal/cmd/controller/finalize/finalize.go index 1bf2b8ed47..b38491e901 100644 --- a/internal/cmd/controller/finalize/finalize.go +++ b/internal/cmd/controller/finalize/finalize.go @@ -17,17 +17,18 @@ import ( ) const ( + HelmAppFinalizer = "fleet.cattle.io/helmapp-finalizer" GitRepoFinalizer = "fleet.cattle.io/gitrepo-finalizer" BundleFinalizer = "fleet.cattle.io/bundle-finalizer" BundleDeploymentFinalizer = "fleet.cattle.io/bundle-deployment-finalizer" ) -// PurgeBundles deletes all bundles related to the given GitRepo namespaced name +// PurgeBundles deletes all bundles related to the given resource namespaced name // It deletes resources in cascade. Deleting Bundles, its BundleDeployments, and // the related namespace if Bundle.Spec.DeleteNamespace is set to true. -func PurgeBundles(ctx context.Context, c client.Client, gitrepo types.NamespacedName) error { +func PurgeBundles(ctx context.Context, c client.Client, gitrepo types.NamespacedName, resourceLabel string) error { bundles := &v1alpha1.BundleList{} - err := c.List(ctx, bundles, client.MatchingLabels{v1alpha1.RepoLabel: gitrepo.Name}, client.InNamespace(gitrepo.Namespace)) + err := c.List(ctx, bundles, client.MatchingLabels{resourceLabel: gitrepo.Name}, client.InNamespace(gitrepo.Namespace)) if err != nil { return err } diff --git a/internal/cmd/controller/gitops/operator.go b/internal/cmd/controller/gitops/operator.go index be5c8e4d31..2575bcac77 100644 --- a/internal/cmd/controller/gitops/operator.go +++ b/internal/cmd/controller/gitops/operator.go @@ -171,6 +171,7 @@ func (g *GitOperator) Run(cmd *cobra.Command, args []string) error { if err = statusReconciler.SetupWithManager(mgr); err != nil { return err } + return mgr.Start(ctx) }) diff --git a/internal/cmd/controller/gitops/reconciler/gitjob_controller.go b/internal/cmd/controller/gitops/reconciler/gitjob_controller.go index 8ddd5130b9..32fc27a544 100644 --- a/internal/cmd/controller/gitops/reconciler/gitjob_controller.go +++ b/internal/cmd/controller/gitops/reconciler/gitjob_controller.go @@ -281,7 +281,7 @@ func (r *GitJobReconciler) cleanupGitRepo(ctx context.Context, logger logr.Logge metrics.GitRepoCollector.Delete(gitrepo.Name, gitrepo.Namespace) nsName := types.NamespacedName{Name: gitrepo.Name, Namespace: gitrepo.Namespace} - if err := finalize.PurgeBundles(ctx, r.Client, nsName); err != nil { + if err := finalize.PurgeBundles(ctx, r.Client, nsName, v1alpha1.RepoLabel); err != nil { return err } diff --git a/internal/cmd/controller/gitops/reconciler/status_controller.go b/internal/cmd/controller/gitops/reconciler/status_controller.go index cf4d4c3293..7aa990fa67 100644 --- a/internal/cmd/controller/gitops/reconciler/status_controller.go +++ b/internal/cmd/controller/gitops/reconciler/status_controller.go @@ -3,9 +3,9 @@ package reconciler import ( "context" "fmt" - "reflect" "sort" + "github.com/rancher/fleet/internal/cmd/controller/status" "github.com/rancher/fleet/internal/cmd/controller/summary" "github.com/rancher/fleet/internal/resourcestatus" v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" @@ -19,10 +19,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" ) type StatusReconciler struct { @@ -51,7 +49,7 @@ func (r *StatusReconciler) SetupWithManager(mgr ctrl.Manager) error { return []ctrl.Request{} }), - builder.WithPredicates(bundleStatusChangedPredicate()), + builder.WithPredicates(status.BundleStatusChangedPredicate()), ). WithEventFilter(sharding.FilterByShardID(r.ShardID)). WithOptions(controller.Options{MaxConcurrentReconciles: r.Workers}). @@ -120,42 +118,18 @@ func (r *StatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, nil } -// bundleStatusChangedPredicate returns true if the bundle -// status has changed, or the bundle was created -func bundleStatusChangedPredicate() predicate.Funcs { - return predicate.Funcs{ - CreateFunc: func(e event.CreateEvent) bool { - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - n, isBundle := e.ObjectNew.(*v1alpha1.Bundle) - if !isBundle { - return false - } - o := e.ObjectOld.(*v1alpha1.Bundle) - if n == nil || o == nil { - return false - } - return !reflect.DeepEqual(n.Status, o.Status) - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return true - }, - } -} - func setStatus(list *v1alpha1.BundleDeploymentList, gitrepo *v1alpha1.GitRepo) error { // sort for resourceKey? sort.Slice(list.Items, func(i, j int) bool { return list.Items[i].UID < list.Items[j].UID }) - err := setFields(list, gitrepo) + err := status.SetFields(list, &gitrepo.Status.StatusBase) if err != nil { return err } - resourcestatus.SetGitRepoResources(list, gitrepo) + resourcestatus.SetResources(list, &gitrepo.Status.StatusBase) summary.SetReadyConditions(&gitrepo.Status, "Bundle", gitrepo.Status.Summary) @@ -165,69 +139,3 @@ func setStatus(list *v1alpha1.BundleDeploymentList, gitrepo *v1alpha1.GitRepo) e return nil } - -// setFields sets bundledeployment related status fields: -// Summary, ReadyClusters, DesiredReadyClusters, Display.State, Display.Message, Display.Error -func setFields(list *v1alpha1.BundleDeploymentList, gitrepo *v1alpha1.GitRepo) error { - var ( - maxState v1alpha1.BundleState - message string - count = map[client.ObjectKey]int{} - readyCount = map[client.ObjectKey]int{} - ) - - gitrepo.Status.Summary = v1alpha1.BundleSummary{} - - for _, bd := range list.Items { - state := summary.GetDeploymentState(&bd) - summary.IncrementState(&gitrepo.Status.Summary, bd.Name, state, summary.MessageFromDeployment(&bd), bd.Status.ModifiedStatus, bd.Status.NonReadyStatus) - gitrepo.Status.Summary.DesiredReady++ - if v1alpha1.StateRank[state] > v1alpha1.StateRank[maxState] { - maxState = state - message = summary.MessageFromDeployment(&bd) - } - - // gather status per cluster - // try to avoid old bundle deployments, which might be missing the labels - if bd.Labels == nil { - // this should not happen - continue - } - - name := bd.Labels[v1alpha1.ClusterLabel] - namespace := bd.Labels[v1alpha1.ClusterNamespaceLabel] - if name == "" || namespace == "" { - // this should not happen - continue - } - - key := client.ObjectKey{Name: name, Namespace: namespace} - count[key]++ - if state == v1alpha1.Ready { - readyCount[key]++ - } - } - - // unique number of clusters from bundledeployments - gitrepo.Status.DesiredReadyClusters = len(count) - - // number of clusters where all deployments are ready - readyClusters := 0 - for key, n := range readyCount { - if count[key] == n { - readyClusters++ - } - } - gitrepo.Status.ReadyClusters = readyClusters - - if maxState == v1alpha1.Ready { - maxState = "" - message = "" - } - - gitrepo.Status.Display.State = string(maxState) - gitrepo.Status.Display.Message = message - gitrepo.Status.Display.Error = len(message) > 0 - - return nil -} diff --git a/internal/cmd/controller/helmops/operator.go b/internal/cmd/controller/helmops/operator.go new file mode 100644 index 0000000000..40c915bae4 --- /dev/null +++ b/internal/cmd/controller/helmops/operator.go @@ -0,0 +1,178 @@ +package helmops + +import ( + "fmt" + "os" + "strconv" + + "github.com/reugn/go-quartz/quartz" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + clog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + command "github.com/rancher/fleet/internal/cmd" + "github.com/rancher/fleet/internal/cmd/controller/helmops/reconciler" + fcreconciler "github.com/rancher/fleet/internal/cmd/controller/reconciler" + "github.com/rancher/fleet/internal/metrics" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/fleet/pkg/version" +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") + zopts *zap.Options +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(fleet.AddToScheme(scheme)) +} + +type HelmOperator struct { + command.DebugConfig + Kubeconfig string `usage:"Kubeconfig file"` + Namespace string `usage:"namespace to watch" default:"cattle-fleet-system" env:"NAMESPACE"` + MetricsAddr string `name:"metrics-bind-address" default:":8081" usage:"The address the metric endpoint binds to."` + DisableMetrics bool `name:"disable-metrics" usage:"Disable the metrics server."` + EnableLeaderElection bool `name:"leader-elect" default:"true" usage:"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager."` + ShardID string `usage:"only manage resources labeled with a specific shard ID" name:"shard-id"` +} + +func App(zo *zap.Options) *cobra.Command { + zopts = zo + return command.Command(&HelmOperator{}, cobra.Command{ + Version: version.FriendlyVersion(), + Use: "helmops", + }) +} + +// HelpFunc hides the global flag from the help output +func (c *HelmOperator) HelpFunc(cmd *cobra.Command, strings []string) { + cmd.Parent().HelpFunc()(cmd, strings) +} + +func (g *HelmOperator) PersistentPre(_ *cobra.Command, _ []string) error { + if err := g.SetupDebug(); err != nil { + return fmt.Errorf("failed to setup debug logging: %w", err) + } + zopts = g.OverrideZapOpts(zopts) + + return nil +} + +func (g *HelmOperator) Run(cmd *cobra.Command, args []string) error { + ctrl.SetLogger(zap.New(zap.UseFlagOptions(zopts))) + ctx := clog.IntoContext(cmd.Context(), ctrl.Log.WithName("helmapp-reconciler")) + + namespace := g.Namespace + + leaderOpts, err := command.NewLeaderElectionOptions() + if err != nil { + return err + } + + var shardIDSuffix string + if g.ShardID != "" { + shardIDSuffix = fmt.Sprintf("-%s", g.ShardID) + } + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + Metrics: g.setupMetrics(), + LeaderElection: g.EnableLeaderElection, + LeaderElectionID: fmt.Sprintf("fleet-helmops-leader-election-shard%s", shardIDSuffix), + LeaderElectionNamespace: namespace, + LeaseDuration: leaderOpts.LeaseDuration, + RenewDeadline: leaderOpts.RenewDeadline, + RetryPeriod: leaderOpts.RetryPeriod, + }) + + if err != nil { + return err + } + + sched := quartz.NewStdScheduler() + + var workers int + if d := os.Getenv("HELMOPS_RECONCILER_WORKERS"); d != "" { + w, err := strconv.Atoi(d) + if err != nil { + setupLog.Error(err, "failed to parse HELMOPS_RECONCILER_WORKERS", "value", d) + } + workers = w + } + + helmAppReconciler := &reconciler.HelmAppReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Scheduler: sched, + Workers: workers, + ShardID: g.ShardID, + Recorder: mgr.GetEventRecorderFor(fmt.Sprintf("fleet-helmops%s", shardIDSuffix)), + } + + helmAppStatusReconciler := &reconciler.HelmAppStatusReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ShardID: g.ShardID, + Workers: workers, + } + + configReconciler := &fcreconciler.ConfigReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + SystemNamespace: namespace, + ShardID: g.ShardID, + } + + if err := fcreconciler.Load(ctx, mgr.GetAPIReader(), namespace); err != nil { + setupLog.Error(err, "failed to load config") + return err + } + + group, ctx := errgroup.WithContext(ctx) + group.Go(func() error { + setupLog.Info("starting config controller") + if err = configReconciler.SetupWithManager(mgr); err != nil { + return err + } + + setupLog.Info("starting helmops controller") + if err = helmAppReconciler.SetupWithManager(mgr); err != nil { + return err + } + + setupLog.Info("starting helmops status controller") + if err = helmAppStatusReconciler.SetupWithManager(mgr); err != nil { + return err + } + + return mgr.Start(ctx) + }) + + return group.Wait() +} + +func (g *HelmOperator) setupMetrics() metricsserver.Options { + if g.DisableMetrics { + return metricsserver.Options{BindAddress: "0"} + } + + metricsAddr := g.MetricsAddr + if d := os.Getenv("HELMOPS_METRICS_BIND_ADDRESS"); d != "" { + metricsAddr = d + } + + metricServerOpts := metricsserver.Options{BindAddress: metricsAddr} + metrics.RegisterHelmOpsMetrics() // enable helmops related metrics + + return metricServerOpts +} diff --git a/internal/cmd/controller/helmops/reconciler/helmapp_controller.go b/internal/cmd/controller/helmops/reconciler/helmapp_controller.go new file mode 100644 index 0000000000..39cb86acda --- /dev/null +++ b/internal/cmd/controller/helmops/reconciler/helmapp_controller.go @@ -0,0 +1,405 @@ +package reconciler + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + errutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/genericcondition" + "github.com/reugn/go-quartz/quartz" + + "github.com/rancher/fleet/internal/bundlereader" + fleetutil "github.com/rancher/fleet/internal/cmd/controller/errorutil" + "github.com/rancher/fleet/internal/cmd/controller/finalize" + "github.com/rancher/fleet/internal/metrics" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/fleet/pkg/sharding" +) + +// HelmAppReconciler reconciles a HelmApp resource to create and apply bundles for helm charts +type HelmAppReconciler struct { + client.Client + Scheme *runtime.Scheme + Scheduler quartz.Scheduler + Workers int + ShardID string + Recorder record.EventRecorder +} + +func (r *HelmAppReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&fleet.HelmApp{}, + builder.WithPredicates( + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.AnnotationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + ), + ), + ). + WithEventFilter(sharding.FilterByShardID(r.ShardID)). + WithOptions(controller.Options{MaxConcurrentReconciles: r.Workers}). + Complete(r) +} + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// The Reconcile function compares the state specified by +// the HelmApp object against the actual cluster state, and then +// performs operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile +func (r *HelmAppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + if !experimentalHelmOpsEnabled() { + return ctrl.Result{}, fmt.Errorf("HelmApp resource was found but env variable EXPERIMENTAL_HELM_OPS is not set to true") + } + logger := log.FromContext(ctx).WithName("HelmApp") + helmapp := &fleet.HelmApp{} + + if err := r.Get(ctx, req.NamespacedName, helmapp); err != nil && !errors.IsNotFound(err) { + return ctrl.Result{}, err + } else if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + + // Finalizer handling + purgeBundlesFn := func() error { + nsName := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + if err := finalize.PurgeBundles(ctx, r.Client, nsName, fleet.HelmAppLabel); err != nil { + return err + } + return nil + } + + if !helmapp.GetDeletionTimestamp().IsZero() { + + metrics.HelmCollector.Delete(helmapp.Name, helmapp.Namespace) + + if err := purgeBundlesFn(); err != nil { + return ctrl.Result{}, err + } + if controllerutil.ContainsFinalizer(helmapp, finalize.HelmAppFinalizer) { + if err := deleteFinalizer(ctx, r.Client, helmapp, finalize.HelmAppFinalizer); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil + } + + if !controllerutil.ContainsFinalizer(helmapp, finalize.HelmAppFinalizer) { + if err := addFinalizer(ctx, r.Client, helmapp, finalize.HelmAppFinalizer); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{Requeue: true}, nil + } + + // Reconciling + logger = logger.WithValues("generation", helmapp.Generation, "chart", helmapp.Spec.Helm.Chart) + ctx = log.IntoContext(ctx, logger) + + logger.V(1).Info("Reconciling HelmApp") + + if helmapp.Spec.Helm.Chart == "" { + return ctrl.Result{}, nil + } + + bundle, err := r.createUpdateBundle(ctx, helmapp) + if err != nil { + return ctrl.Result{}, updateErrorStatusHelm(ctx, r.Client, req.NamespacedName, helmapp.Status, err) + } + + helmapp.Status.Version = bundle.Spec.Helm.Version + + err = updateStatus(ctx, r.Client, req.NamespacedName, helmapp.Status) + if err != nil { + logger.Error(err, "Reconcile failed final update to helm app status", "status", helmapp.Status) + + return ctrl.Result{Requeue: true}, err + } + + return ctrl.Result{}, err +} + +func (r *HelmAppReconciler) createUpdateBundle(ctx context.Context, helmapp *fleet.HelmApp) (*fleet.Bundle, error) { + b := &fleet.Bundle{} + nsName := types.NamespacedName{ + Name: helmapp.Name, + Namespace: helmapp.Namespace, + } + + err := r.Get(ctx, nsName, b) + if err != nil && !errors.IsNotFound(err) { + return nil, err + } + + // calculate the new representation of the helmapp resource + bundle := r.calculateBundle(helmapp) + + if err := r.handleVersion(ctx, b, bundle, helmapp); err != nil { + return nil, err + } + + updated := bundle.DeepCopy() + _, err = controllerutil.CreateOrUpdate(ctx, r.Client, bundle, func() error { + bundle.Spec = updated.Spec + bundle.Annotations = updated.Annotations + bundle.Labels = updated.Labels + return nil + }) + + return bundle, err +} + +// Calculates the bundle representation of the given HelmApp resource +func (r *HelmAppReconciler) calculateBundle(helmapp *fleet.HelmApp) *fleet.Bundle { + spec := helmapp.Spec.BundleSpec + + // set target names + for i, target := range spec.Targets { + if target.Name == "" { + spec.Targets[i].Name = fmt.Sprintf("target%03d", i) + } + } + + propagateHelmAppProperties(&spec) + + bundle := &fleet.Bundle{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: helmapp.Namespace, + Name: helmapp.Name, + }, + Spec: spec, + } + if len(bundle.Spec.Targets) == 0 { + bundle.Spec.Targets = []fleet.BundleTarget{ + { + Name: "default", + ClusterGroup: "default", + }, + } + } + + // apply additional labels from spec + for k, v := range helmapp.Spec.Labels { + if bundle.Labels == nil { + bundle.Labels = make(map[string]string) + } + bundle.Labels[k] = v + } + bundle.Labels = labels.Merge(bundle.Labels, map[string]string{ + fleet.HelmAppLabel: helmapp.Name, + }) + + // Setting the Resources to nil, the agent will download the helm chart + bundle.Spec.Resources = nil + // store the helm options (this will also enable the helm chart deployment in the bundle) + bundle.Spec.HelmAppOptions = &fleet.BundleHelmOptions{ + SecretName: helmapp.Spec.HelmSecretName, + InsecureSkipTLSverify: helmapp.Spec.InsecureSkipTLSverify, + } + + return bundle +} + +// propagateHelmAppProperties propagates root Helm chart properties to the child targets. +// This is necessary, so we can download the correct chart version for each target. +func propagateHelmAppProperties(spec *fleet.BundleSpec) { + // Check if there is anything to propagate + if spec.Helm == nil { + return + } + for _, target := range spec.Targets { + if target.Helm == nil { + // This target has nothing to propagate to + continue + } + if target.Helm.Repo == "" { + target.Helm.Repo = spec.Helm.Repo + } + if target.Helm.Chart == "" { + target.Helm.Chart = spec.Helm.Chart + } + if target.Helm.Version == "" { + target.Helm.Version = spec.Helm.Version + } + } +} + +func addFinalizer[T client.Object](ctx context.Context, c client.Client, obj T, finalizer string) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nsName := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} + if err := c.Get(ctx, nsName, obj); err != nil { + return err + } + + controllerutil.AddFinalizer(obj, finalizer) + + return c.Update(ctx, obj) + }) + + if err != nil { + return client.IgnoreNotFound(err) + } + + return nil +} + +func deleteFinalizer[T client.Object](ctx context.Context, c client.Client, obj T, finalizer string) error { + err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nsName := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} + if err := c.Get(ctx, nsName, obj); err != nil { + return err + } + + controllerutil.RemoveFinalizer(obj, finalizer) + + return c.Update(ctx, obj) + }) + if client.IgnoreNotFound(err) != nil { + return err + } + return nil +} + +// handleVersion handles empty or * versions, downloading the current version from the registry. +// This is calculated in the upstream cluster so all downstream bundle deployments have the same +// version. (Potentially we could be gathering the version at very moment it is being updated, for example) +func (r *HelmAppReconciler) handleVersion(ctx context.Context, oldBundle *fleet.Bundle, bundle *fleet.Bundle, helmapp *fleet.HelmApp) error { + if helmapp.Spec.Helm.Version != "" && helmapp.Spec.Helm.Version != "*" { + bundle.Spec.Helm.Version = helmapp.Spec.Helm.Version + return nil + } + if helmChartSpecChanged(oldBundle.Spec.Helm, bundle.Spec.Helm, helmapp.Status.Version) { + auth := bundlereader.Auth{} + if helmapp.Spec.HelmSecretName != "" { + req := types.NamespacedName{Namespace: helmapp.Namespace, Name: helmapp.Spec.HelmSecretName} + var err error + auth, err = bundlereader.ReadHelmAuthFromSecret(ctx, r.Client, req) + if err != nil { + return err + } + } + auth.InsecureSkipVerify = helmapp.Spec.InsecureSkipTLSverify + + version, err := bundlereader.ChartVersion(*bundle.Spec.Helm, auth) + if err != nil { + return err + } + bundle.Spec.Helm.Version = version + } else { + bundle.Spec.Helm.Version = helmapp.Status.Version + } + + return nil +} + +// updateStatus updates the status for the HelmApp resource. It retries on +// conflict. If the status was updated successfully, it also collects (as in +// updates) metrics for the HelmApp resource. +func updateStatus(ctx context.Context, c client.Client, req types.NamespacedName, status fleet.HelmAppStatus) error { + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + t := &fleet.HelmApp{} + err := c.Get(ctx, req, t) + if err != nil { + return err + } + + // selectively update the status fields this reconciler is responsible for + t.Status.Version = status.Version + + // only keep the Ready condition from live status, it's calculated by the status reconciler + conds := []genericcondition.GenericCondition{} + for _, c := range t.Status.Conditions { + if c.Type == "Ready" { + conds = append(conds, c) + break + } + } + for _, c := range status.Conditions { + if c.Type == "Ready" { + continue + } + conds = append(conds, c) + } + t.Status.Conditions = conds + + err = c.Status().Update(ctx, t) + if err != nil { + return err + } + + metrics.HelmCollector.Collect(ctx, t) + + return nil + }) +} + +// updateErrorStatusHelm sets the condition in the status and tries to update the resource +func updateErrorStatusHelm(ctx context.Context, c client.Client, req types.NamespacedName, status fleet.HelmAppStatus, orgErr error) error { + setAcceptedConditionHelm(&status, orgErr) + if statusErr := updateStatus(ctx, c, req, status); statusErr != nil { + merr := []error{orgErr, fmt.Errorf("failed to update the status: %w", statusErr)} + return errutil.NewAggregate(merr) + } + return orgErr +} + +// setAcceptedCondition sets the condition and updates the timestamp, if the condition changed +func setAcceptedConditionHelm(status *fleet.HelmAppStatus, err error) { + cond := condition.Cond(fleet.HelmAppAcceptedCondition) + origStatus := status.DeepCopy() + cond.SetError(status, "", fleetutil.IgnoreConflict(err)) + if !equality.Semantic.DeepEqual(origStatus, status) { + cond.LastUpdated(status, time.Now().UTC().Format(time.RFC3339)) + } +} + +func helmChartSpecChanged(o *fleet.HelmOptions, n *fleet.HelmOptions, statusVersion string) bool { + if o == nil { + // still not set + return true + } + if o.Repo != n.Repo { + return true + } + if o.Chart != n.Chart { + return true + } + // check also against statusVersion in case that Reconcile is called + // before the status subresource has been fully updated in the cluster (and the cache) + if o.Version != n.Version && statusVersion != o.Version { + return true + } + return false +} + +// experimentalHelmOpsEnabled returns true if the EXPERIMENTAL_HELM_OPS env variable is set to true +// returns false otherwise +func experimentalHelmOpsEnabled() bool { + value, err := strconv.ParseBool(os.Getenv("EXPERIMENTAL_HELM_OPS")) + return err == nil && value +} diff --git a/internal/cmd/controller/helmops/reconciler/helmapp_controller_test.go b/internal/cmd/controller/helmops/reconciler/helmapp_controller_test.go new file mode 100644 index 0000000000..680bcaf77d --- /dev/null +++ b/internal/cmd/controller/helmops/reconciler/helmapp_controller_test.go @@ -0,0 +1,224 @@ +//go:generate mockgen --build_flags=--mod=mod -destination=../../../../mocks/poller_mock.go -package=mocks github.com/rancher/fleet/internal/cmd/controller/gitops/reconciler GitPoller +//go:generate mockgen --build_flags=--mod=mod -destination=../../../../mocks/client_mock.go -package=mocks sigs.k8s.io/controller-runtime/pkg/client Client,SubResourceWriter + +package reconciler + +import ( + "context" + "fmt" + "os" + "testing" + + "go.uber.org/mock/gomock" + + "github.com/rancher/fleet/internal/cmd/controller/finalize" + "github.com/rancher/fleet/internal/mocks" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/genericcondition" + + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func getCondition(helmapp *fleet.HelmApp, condType string) (genericcondition.GenericCondition, bool) { + for _, cond := range helmapp.Status.Conditions { + if cond.Type == condType { + return cond, true + } + } + return genericcondition.GenericCondition{}, false +} + +func TestReconcile_ReturnsAndRequeuesAfterAddingFinalizer(t *testing.T) { + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + scheme := runtime.NewScheme() + utilruntime.Must(batchv1.AddToScheme(scheme)) + helmapp := fleet.HelmApp{ + ObjectMeta: metav1.ObjectMeta{ + Name: "helmapp", + Namespace: "default", + }, + } + namespacedName := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + client := mocks.NewMockClient(mockCtrl) + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, fh *fleet.HelmApp, opts ...interface{}) error { + fh.Name = helmapp.Name + fh.Namespace = helmapp.Namespace + fh.Spec.Helm = &fleet.HelmOptions{ + Chart: "chart", + } + return nil + }, + ) + // expected from addFinalizer + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + client.EXPECT().Update(gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + + r := HelmAppReconciler{ + Client: client, + Scheme: scheme, + } + + ctx := context.TODO() + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err != nil { + t.Errorf("unexpected error %v", err) + } + if !res.Requeue { + t.Errorf("expecting Requeue set to true, it was false") + } +} + +func TestReconcile_ErrorCreatingBundleIsShownInStatus(t *testing.T) { + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + scheme := runtime.NewScheme() + utilruntime.Must(batchv1.AddToScheme(scheme)) + helmapp := fleet.HelmApp{ + ObjectMeta: metav1.ObjectMeta{ + Name: "helmapp", + Namespace: "default", + }, + } + namespacedName := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + client := mocks.NewMockClient(mockCtrl) + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, fh *fleet.HelmApp, opts ...interface{}) error { + fh.Name = helmapp.Name + fh.Namespace = helmapp.Namespace + fh.Spec.Helm = &fleet.HelmOptions{ + Chart: "chart", + } + controllerutil.AddFinalizer(fh, finalize.HelmAppFinalizer) + return nil + }, + ) + + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, bundle *fleet.Bundle, opts ...interface{}) error { + return fmt.Errorf("this is a test error") + }, + ) + + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, bundle *fleet.HelmApp, opts ...interface{}) error { + return nil + }, + ) + + statusClient := mocks.NewMockSubResourceWriter(mockCtrl) + client.EXPECT().Status().Return(statusClient).Times(1) + statusClient.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, helmapp *fleet.HelmApp, opts ...interface{}) { + c, found := getCondition(helmapp, fleet.HelmAppAcceptedCondition) + if !found { + t.Errorf("expecting to find the %s condition and could not find it.", fleet.HelmAppAcceptedCondition) + } + if c.Message != "this is a test error" { + t.Errorf("expecting message [this is a test error] in condition, got [%s]", c.Message) + } + }, + ).Times(1) + + r := HelmAppReconciler{ + Client: client, + Scheme: scheme, + } + + ctx := context.TODO() + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err == nil { + t.Errorf("expecting error, got nil") + } + if err.Error() != "this is a test error" { + t.Errorf("expecting error: [this is a test error], got %v", err.Error()) + } + if res.Requeue { + t.Errorf("expecting Requeue set to false, it was true") + } +} + +func TestReconcile_CreatesBundleAndUpdatesStatus(t *testing.T) { + os.Setenv("EXPERIMENTAL_HELM_OPS", "true") + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + scheme := runtime.NewScheme() + utilruntime.Must(batchv1.AddToScheme(scheme)) + helmapp := fleet.HelmApp{ + ObjectMeta: metav1.ObjectMeta{ + Name: "helmapp", + Namespace: "default", + }, + } + namespacedName := types.NamespacedName{Name: helmapp.Name, Namespace: helmapp.Namespace} + client := mocks.NewMockClient(mockCtrl) + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, fh *fleet.HelmApp, opts ...interface{}) error { + fh.Name = helmapp.Name + fh.Namespace = helmapp.Namespace + fh.Spec.Helm = &fleet.HelmOptions{ + Chart: "chart", + Version: "1.1.2", + } + controllerutil.AddFinalizer(fh, finalize.HelmAppFinalizer) + return nil + }, + ) + + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(2).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, bundle *fleet.Bundle, opts ...interface{}) error { + return errors.NewNotFound(schema.GroupResource{}, "Not found") + }, + ) + + client.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, bundle *fleet.Bundle, opts ...interface{}) error { + return nil + }, + ) + + client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, req types.NamespacedName, bundle *fleet.HelmApp, opts ...interface{}) error { + return nil + }, + ) + + statusClient := mocks.NewMockSubResourceWriter(mockCtrl) + client.EXPECT().Status().Return(statusClient).Times(1) + statusClient.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Any()).Do( + func(ctx context.Context, helmapp *fleet.HelmApp, opts ...interface{}) { + // version in status should be the one in the spec + if helmapp.Status.Version != "1.1.2" { + t.Errorf("expecting Status.Version == 1.1.2, got %s", helmapp.Status.Version) + } + }, + ).Times(1) + + r := HelmAppReconciler{ + Client: client, + Scheme: scheme, + } + + ctx := context.TODO() + + res, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: namespacedName}) + if err != nil { + t.Errorf("found unexpected error %v", err) + } + if res.Requeue { + t.Errorf("expecting Requeue set to false, it was true") + } +} diff --git a/internal/cmd/controller/helmops/reconciler/helmapp_status.go b/internal/cmd/controller/helmops/reconciler/helmapp_status.go new file mode 100644 index 0000000000..1b7f470bed --- /dev/null +++ b/internal/cmd/controller/helmops/reconciler/helmapp_status.go @@ -0,0 +1,133 @@ +package reconciler + +import ( + "context" + "fmt" + "sort" + + "github.com/rancher/fleet/internal/cmd/controller/status" + "github.com/rancher/fleet/internal/cmd/controller/summary" + "github.com/rancher/fleet/internal/resourcestatus" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/fleet/pkg/durations" + "github.com/rancher/fleet/pkg/sharding" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type HelmAppStatusReconciler struct { + client.Client + Scheme *runtime.Scheme + Workers int + ShardID string +} + +func (r *HelmAppStatusReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&fleet.HelmApp{}). + Watches( + // Fan out from bundle to HelmApp + &fleet.Bundle{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []ctrl.Request { + app := a.GetLabels()[fleet.HelmAppLabel] + if app != "" { + return []ctrl.Request{{ + NamespacedName: types.NamespacedName{ + Namespace: a.GetNamespace(), + Name: app, + }, + }} + } + + return []ctrl.Request{} + }), + builder.WithPredicates(status.BundleStatusChangedPredicate()), + ). + WithEventFilter(sharding.FilterByShardID(r.ShardID)). + WithOptions(controller.Options{MaxConcurrentReconciles: r.Workers}). + Named("HelmAppStatus"). + Complete(r) +} + +// Reconcile reads the stat of the HelmApp and BundleDeployments and +// computes status fields for the HelmApp. This status is used to +// display information to the user. +func (r *HelmAppStatusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + if !experimentalHelmOpsEnabled() { + return ctrl.Result{}, nil + } + logger := log.FromContext(ctx).WithName("helmapp-status") + helmapp := &fleet.HelmApp{} + + if err := r.Get(ctx, req.NamespacedName, helmapp); err != nil && !errors.IsNotFound(err) { + return ctrl.Result{}, err + } else if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + + if !helmapp.DeletionTimestamp.IsZero() { + // the HelmApp controller will handle deletion + return ctrl.Result{}, nil + } + + if helmapp.Spec.Helm.Chart == "" { + return ctrl.Result{}, nil + } + + logger = logger.WithValues("generation", helmapp.Generation, "chart", helmapp.Spec.Helm.Chart).WithValues("conditions", helmapp.Status.Conditions) + ctx = log.IntoContext(ctx, logger) + + logger.V(1).Info("Reconciling HelmApp status") + + bdList := &fleet.BundleDeploymentList{} + err := r.List(ctx, bdList, client.MatchingLabels{ + fleet.HelmAppLabel: helmapp.Name, + fleet.BundleNamespaceLabel: helmapp.Namespace, + }) + if err != nil { + return ctrl.Result{}, err + } + + err = setStatusHelm(bdList, helmapp) + if err != nil { + return ctrl.Result{}, err + } + + err = r.Client.Status().Update(ctx, helmapp) + if err != nil { + logger.Error(err, "Reconcile failed update to helm app status", "status", helmapp.Status) + return ctrl.Result{RequeueAfter: durations.HelmAppStatusDelay}, nil + } + + return ctrl.Result{}, nil +} + +func setStatusHelm(list *fleet.BundleDeploymentList, helmapp *fleet.HelmApp) error { + // sort for resourceKey? + sort.Slice(list.Items, func(i, j int) bool { + return list.Items[i].UID < list.Items[j].UID + }) + + err := status.SetFields(list, &helmapp.Status.StatusBase) + if err != nil { + return err + } + + resourcestatus.SetResources(list, &helmapp.Status.StatusBase) + + summary.SetReadyConditions(&helmapp.Status, "Bundle", helmapp.Status.Summary) + + helmapp.Status.Display.ReadyBundleDeployments = fmt.Sprintf("%d/%d", + helmapp.Status.Summary.Ready, + helmapp.Status.Summary.DesiredReady) + + return nil +} diff --git a/internal/cmd/controller/reconciler/bundle_controller.go b/internal/cmd/controller/reconciler/bundle_controller.go index 3d5dfe6577..3d0b6dad94 100644 --- a/internal/cmd/controller/reconciler/bundle_controller.go +++ b/internal/cmd/controller/reconciler/bundle_controller.go @@ -5,6 +5,8 @@ package reconciler import ( "context" "fmt" + "os" + "strconv" "github.com/go-logr/logr" "github.com/rancher/fleet/internal/cmd/controller/finalize" @@ -150,10 +152,18 @@ func (r *BundleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr bundle.Status.ObservedGeneration, ) + // if the bundle has the helmops options set but the experimental flag is not + // set we don't deploy the bundle. + // This is to avoid intentional or accidental deployment of bundles with no + // resources or not well defined. + if bundle.Spec.HelmAppOptions != nil && !experimentalHelmOpsEnabled() { + return ctrl.Result{}, fmt.Errorf("bundle contains data used by helm ops but env variable EXPERIMENTAL_HELM_OPS is not set to true") + } contentsInOCI := bundle.Spec.ContentsID != "" && ociwrapper.ExperimentalOCIIsEnabled() + contentsInHelmChart := bundle.Spec.HelmAppOptions != nil manifestID := bundle.Spec.ContentsID var resourcesManifest *manifest.Manifest - if !contentsInOCI { + if !contentsInOCI && !contentsInHelmChart { resourcesManifest = manifest.FromBundle(bundle) if bundle.Generation != bundle.Status.ObservedGeneration { resourcesManifest.ResetSHASum() @@ -177,8 +187,8 @@ func (r *BundleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr return ctrl.Result{}, err } - if !contentsInOCI && len(matchedTargets) > 0 { - // when not using the OCI registry we need to create a contents resource + if (!contentsInOCI && !contentsInHelmChart) && len(matchedTargets) > 0 { + // when not using the OCI registry or helm chart we need to create a contents resource // so the BundleDeployments are able to access the contents to be deployed. // Otherwise, do not create a content resource if there are no targets. // `fleet apply` puts all resources into `bundle.Spec.Resources`. @@ -220,16 +230,19 @@ func (r *BundleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // DependsOn with the bundle's DependsOn (pure function) and replacing // the labels with the bundle's labels for _, target := range matchedTargets { - bd, err := r.createBundleDeployment(ctx, logger, target, contentsInOCI, manifestID) + bd, err := r.createBundleDeployment( + ctx, + logger, + target, + contentsInOCI, + bundle.Spec.HelmAppOptions, + manifestID) if err != nil { return ctrl.Result{}, err } - if bd != nil && contentsInOCI { - // we need to create the OCI registry credentials secret in the BundleDeployment's namespace - if err := r.createDeploymentOCISecret(ctx, bundle, bd); err != nil { - return ctrl.Result{}, err - } + if err := r.handleDeploymentSecret(ctx, bundle, bd); err != nil { + return ctrl.Result{}, err } } @@ -326,6 +339,7 @@ func (r *BundleReconciler) createBundleDeployment( logger logr.Logger, target *target.Target, contentsInOCI bool, + helmAppOptions *fleet.BundleHelmOptions, manifestID string, ) (*fleet.BundleDeployment, error) { if target.Deployment == nil { @@ -350,6 +364,7 @@ func (r *BundleReconciler) createBundleDeployment( controllerutil.AddFinalizer(bd, bundleDeploymentFinalizer) bd.Spec.OCIContents = contentsInOCI + bd.Spec.HelmChartOptions = helmAppOptions err := retry.RetryOnConflict(retry.DefaultRetry, func() error { if contentsInOCI { @@ -371,6 +386,7 @@ func (r *BundleReconciler) createBundleDeployment( logger.Error(err, "Reconcile failed to add content finalizer", "content ID", manifestID) } + contentsInHelmChart := helmAppOptions != nil updated := bd.DeepCopy() op, err := controllerutil.CreateOrUpdate(ctx, r.Client, bd, func() error { // When this mutation function is called by CreateOrUpdate, bd contains the @@ -379,7 +395,7 @@ func (r *BundleReconciler) createBundleDeployment( // latest version of the bundle points to a different deployment ID. // An empty value for bd.Spec.DeploymentID means that we are deploying the first version of this // bundle, hence there are no Contents left over to purge. - if !bd.Spec.OCIContents && + if (!bd.Spec.OCIContents || !contentsInHelmChart) && bd.Spec.DeploymentID != "" && bd.Spec.DeploymentID != updated.Spec.DeploymentID { if err := finalize.PurgeContent(ctx, r.Client, bd.Name, bd.Spec.DeploymentID); err != nil { @@ -396,6 +412,7 @@ func (r *BundleReconciler) createBundleDeployment( bd.Spec = updated.Spec bd.Labels = updated.GetLabels() + return nil }) if err != nil && !apierrors.IsAlreadyExists(err) { @@ -414,28 +431,28 @@ func (r *BundleReconciler) createBundleDeployment( return bd, nil } -func (r *BundleReconciler) createDeploymentOCISecret(ctx context.Context, bundle *fleet.Bundle, bd *fleet.BundleDeployment) error { +func (r *BundleReconciler) createDeploymentSecret(ctx context.Context, secretName string, bundle *fleet.Bundle, bd *fleet.BundleDeployment) error { namespacedName := types.NamespacedName{ Namespace: bundle.Namespace, - Name: bundle.Spec.ContentsID, + Name: secretName, } - var ociSecret corev1.Secret - if err := r.Get(ctx, namespacedName, &ociSecret); err != nil { + var secret corev1.Secret + if err := r.Get(ctx, namespacedName, &secret); err != nil { return err } // clone the secret, and just change the namespace so it's in the target's namespace - targetOCISecret := &corev1.Secret{ + targetSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: ociSecret.Name, + Name: secret.Name, Namespace: bd.Namespace, }, - Data: ociSecret.Data, + Data: secret.Data, } - if err := controllerutil.SetControllerReference(bd, targetOCISecret, r.Scheme); err != nil { + if err := controllerutil.SetControllerReference(bd, targetSecret, r.Scheme); err != nil { return err } - if err := r.Create(ctx, targetOCISecret); err != nil { + if err := r.Create(ctx, targetSecret); err != nil { if !apierrors.IsAlreadyExists(err) { return err } @@ -463,3 +480,26 @@ func (r *BundleReconciler) getOCIReference(ctx context.Context, bundle *fleet.Bu // this is not a valid reference, it is only for display return fmt.Sprintf("oci://%s/%s:latest", string(ref), bundle.Spec.ContentsID), nil } + +func (r *BundleReconciler) handleDeploymentSecret(ctx context.Context, bundle *fleet.Bundle, bd *fleet.BundleDeployment) error { + if bd == nil { + return nil + } + contentsInOCI := bundle.Spec.ContentsID != "" && ociwrapper.ExperimentalOCIIsEnabled() + contentsInHelmChart := bundle.Spec.HelmAppOptions != nil && experimentalHelmOpsEnabled() + + if contentsInOCI { + return r.createDeploymentSecret(ctx, bundle.Spec.ContentsID, bundle, bd) + } + if contentsInHelmChart && bundle.Spec.HelmAppOptions.SecretName != "" { + return r.createDeploymentSecret(ctx, bundle.Spec.HelmAppOptions.SecretName, bundle, bd) + } + return nil +} + +// experimentalHelmOpsEnabled returns true if the EXPERIMENTAL_HELM_OPS env variable is set to true +// returns false otherwise +func experimentalHelmOpsEnabled() bool { + value, err := strconv.ParseBool(os.Getenv("EXPERIMENTAL_HELM_OPS")) + return err == nil && value +} diff --git a/internal/cmd/controller/reconciler/cluster_controller.go b/internal/cmd/controller/reconciler/cluster_controller.go index e62715180e..b5e15161e3 100644 --- a/internal/cmd/controller/reconciler/cluster_controller.go +++ b/internal/cmd/controller/reconciler/cluster_controller.go @@ -171,7 +171,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct // Count the number of gitrepo, bundledeployemt and deployed resources for this cluster cluster.Status.DesiredReadyGitRepos = 0 cluster.Status.ReadyGitRepos = 0 - cluster.Status.ResourceCounts = fleet.GitRepoResourceCounts{} + cluster.Status.ResourceCounts = fleet.ResourceCounts{} cluster.Status.Summary = fleet.BundleSummary{} sort.Slice(bundleDeployments.Items, func(i, j int) bool { diff --git a/internal/cmd/controller/reconciler/clustergroup_controller.go b/internal/cmd/controller/reconciler/clustergroup_controller.go index 4f1cb0bb13..35d35d8f17 100644 --- a/internal/cmd/controller/reconciler/clustergroup_controller.go +++ b/internal/cmd/controller/reconciler/clustergroup_controller.go @@ -107,7 +107,7 @@ func (r *ClusterGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request // update summary group.Status.Summary = fleet.BundleSummary{} - group.Status.ResourceCounts = fleet.GitRepoResourceCounts{} + group.Status.ResourceCounts = fleet.ResourceCounts{} group.Status.ClusterCount = 0 group.Status.NonReadyClusterCount = 0 group.Status.NonReadyClusters = nil diff --git a/internal/cmd/controller/root.go b/internal/cmd/controller/root.go index d6dd2c1270..831821306d 100644 --- a/internal/cmd/controller/root.go +++ b/internal/cmd/controller/root.go @@ -11,6 +11,7 @@ import ( "github.com/rancher/fleet/internal/cmd/controller/agentmanagement" "github.com/rancher/fleet/internal/cmd/controller/gitops" + "github.com/rancher/fleet/internal/cmd/controller/helmops" "github.com/spf13/cobra" @@ -156,6 +157,7 @@ func App() *cobra.Command { cleanup.App(), agentmanagement.App(), gitops.App(zopts), + helmops.App(zopts), ) return root } diff --git a/internal/cmd/controller/status/status.go b/internal/cmd/controller/status/status.go new file mode 100644 index 0000000000..f7a3cf06b3 --- /dev/null +++ b/internal/cmd/controller/status/status.go @@ -0,0 +1,102 @@ +package status + +import ( + "reflect" + + "github.com/rancher/fleet/internal/cmd/controller/summary" + "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// BundleStatusChangedPredicate returns true if the bundle +// status has changed, or the bundle was created +func BundleStatusChangedPredicate() predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + n, isBundle := e.ObjectNew.(*v1alpha1.Bundle) + if !isBundle { + return false + } + o := e.ObjectOld.(*v1alpha1.Bundle) + if n == nil || o == nil { + return false + } + return !reflect.DeepEqual(n.Status, o.Status) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + } +} + +// setFields sets bundledeployment related status fields: +// Summary, ReadyClusters, DesiredReadyClusters, Display.State, Display.Message, Display.Error +func SetFields(list *fleet.BundleDeploymentList, status *fleet.StatusBase) error { + var ( + maxState fleet.BundleState + message string + count = map[client.ObjectKey]int{} + readyCount = map[client.ObjectKey]int{} + ) + + status.Summary = fleet.BundleSummary{} + + for _, bd := range list.Items { + state := summary.GetDeploymentState(&bd) + summary.IncrementState(&status.Summary, bd.Name, state, summary.MessageFromDeployment(&bd), bd.Status.ModifiedStatus, bd.Status.NonReadyStatus) + status.Summary.DesiredReady++ + if fleet.StateRank[state] > fleet.StateRank[maxState] { + maxState = state + message = summary.MessageFromDeployment(&bd) + } + + // gather status per cluster + // try to avoid old bundle deployments, which might be missing the labels + if bd.Labels == nil { + // this should not happen + continue + } + + name := bd.Labels[fleet.ClusterLabel] + namespace := bd.Labels[fleet.ClusterNamespaceLabel] + if name == "" || namespace == "" { + // this should not happen + continue + } + + key := client.ObjectKey{Name: name, Namespace: namespace} + count[key]++ + if state == fleet.Ready { + readyCount[key]++ + } + } + + // unique number of clusters from bundledeployments + status.DesiredReadyClusters = len(count) + + // number of clusters where all deployments are ready + readyClusters := 0 + for key, n := range readyCount { + if count[key] == n { + readyClusters++ + } + } + status.ReadyClusters = readyClusters + + if maxState == fleet.Ready { + maxState = "" + message = "" + } + + status.Display.State = string(maxState) + status.Display.Message = message + status.Display.Error = len(message) > 0 + + return nil +} diff --git a/internal/cmd/controller/summary/summary.go b/internal/cmd/controller/summary/summary.go index 6ba29c2b38..c2dd8753d8 100644 --- a/internal/cmd/controller/summary/summary.go +++ b/internal/cmd/controller/summary/summary.go @@ -62,7 +62,7 @@ func Increment(left *fleet.BundleSummary, right fleet.BundleSummary) { } } -func IncrementResourceCounts(left *fleet.GitRepoResourceCounts, right fleet.GitRepoResourceCounts) { +func IncrementResourceCounts(left *fleet.ResourceCounts, right fleet.ResourceCounts) { left.Ready += right.Ready left.DesiredReady += right.DesiredReady left.WaitApplied += right.WaitApplied diff --git a/internal/cmd/controller/target/builder.go b/internal/cmd/controller/target/builder.go index 5d6a41a5c1..27f529809d 100644 --- a/internal/cmd/controller/target/builder.go +++ b/internal/cmd/controller/target/builder.go @@ -50,7 +50,6 @@ func (m *Manager) Targets(ctx context.Context, bundle *fleet.Bundle, manifestID if err != nil { return nil, err } - var targets []*Target for _, namespace := range namespaces { clusters := &fleet.ClusterList{} @@ -58,7 +57,6 @@ func (m *Manager) Targets(ctx context.Context, bundle *fleet.Bundle, manifestID if err != nil { return nil, err } - for _, cluster := range clusters.Items { cluster := cluster logger.V(4).Info("Cluster has namespace?", "cluster", cluster.Name, "namespace", cluster.Status.Namespace) diff --git a/internal/manifest/store_test.go b/internal/manifest/store_test.go index 9409720669..a86eddf90e 100644 --- a/internal/manifest/store_test.go +++ b/internal/manifest/store_test.go @@ -73,7 +73,7 @@ func Test_contentStore_Store(t *testing.T) { client.EXPECT().Get(ctx, nsn, gomock.Any()).Return(nil) client.EXPECT().Create(ctx, gomock.Any()).Times(0) } else { - client.EXPECT().Get(ctx, nsn, gomock.Any()).Return(apierrors.NewNotFound(fleet.Resource("Content"), tt.want)) + client.EXPECT().Get(ctx, nsn, gomock.Any()).Return(apierrors.NewNotFound(fleet.GroupResource("Content"), tt.want)) client.EXPECT().Create(ctx, &contentMatcher{ name: tt.want, sha256sum: checksum, diff --git a/internal/metrics/gitrepo_metrics.go b/internal/metrics/gitrepo_metrics.go index 7900e0769e..60d20db31a 100644 --- a/internal/metrics/gitrepo_metrics.go +++ b/internal/metrics/gitrepo_metrics.go @@ -4,7 +4,6 @@ import ( "strings" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" ) @@ -16,98 +15,7 @@ var ( gitRepoMetrics, collectGitRepoMetrics, } - gitRepoMetrics = map[string]prometheus.Collector{ - "resources_desired_ready": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_desired_ready", - Help: "The count of resources that are desired to be in a Ready state.", - }, - gitRepoLabels, - ), - "resources_missing": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_missing", - Help: "The count of resources that are in a Missing state.", - }, - gitRepoLabels, - ), - "resources_modified": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_modified", - Help: "The count of resources that are in a Modified state.", - }, - gitRepoLabels, - ), - "resources_not_ready": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_not_ready", - Help: "The count of resources that are in a NotReady state.", - }, - gitRepoLabels, - ), - "resources_orphaned": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_orphaned", - Help: "The count of resources that are in an Orphaned state.", - }, - gitRepoLabels, - ), - "resources_ready": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_ready", - Help: "The count of resources that are in a Ready state.", - }, - gitRepoLabels, - ), - "resources_unknown": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_unknown", - Help: "The count of resources that are in an Unknown state.", - }, - gitRepoLabels, - ), - "resources_wait_applied": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "resources_wait_applied", - Help: "The count of resources that are in a WaitApplied state.", - }, - gitRepoLabels, - ), - "desired_ready_clusters": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "desired_ready_clusters", - Help: "The amount of clusters desired to be in a ready state.", - }, - gitRepoLabels, - ), - "ready_clusters": promauto.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: metricPrefix, - Subsystem: gitRepoSubsystem, - Name: "ready_clusters", - Help: "The count of clusters in a Ready state.", - }, - gitRepoLabels, - ), - } + gitRepoMetrics = getStatusMetrics(gitRepoSubsystem, gitRepoLabels) collectGitRepoMetrics = func( obj any, metrics map[string]prometheus.Collector, diff --git a/internal/metrics/helm_metrics.go b/internal/metrics/helm_metrics.go new file mode 100644 index 0000000000..39f375a49f --- /dev/null +++ b/internal/metrics/helm_metrics.go @@ -0,0 +1,55 @@ +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" +) + +var ( + helmSubsystem = "helmapp" + helmLabels = []string{"name", "namespace", "repo", "chart", "version"} + HelmCollector = CollectorCollection{ + helmSubsystem, + helmMetrics, + collectHelmMetrics, + } + helmMetrics = getStatusMetrics(helmSubsystem, helmLabels) + collectHelmMetrics = func( + obj any, + metrics map[string]prometheus.Collector, + ) { + helm, ok := obj.(*fleet.HelmApp) + if !ok { + panic("unexpected object type") + } + + labels := prometheus.Labels{ + "name": helm.Name, + "namespace": helm.Namespace, + "repo": helm.Spec.Helm.Repo, + "chart": helm.Spec.Helm.Chart, + "version": helm.Status.Version, + } + + metrics["desired_ready_clusters"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.DesiredReadyClusters)) + metrics["ready_clusters"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ReadyClusters)) + metrics["resources_missing"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.Missing)) + metrics["resources_modified"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.Modified)) + metrics["resources_not_ready"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.NotReady)) + metrics["resources_orphaned"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.Orphaned)) + metrics["resources_desired_ready"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.DesiredReady)) + metrics["resources_ready"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.Ready)) + metrics["resources_unknown"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.Unknown)) + metrics["resources_wait_applied"].(*prometheus.GaugeVec). + With(labels).Set(float64(helm.Status.ResourceCounts.WaitApplied)) + } +) diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index da6c4ca373..a8d4f5295f 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/log" @@ -44,6 +45,12 @@ func RegisterGitOptsMetrics() { GitRepoCollector.Register() } +func RegisterHelmOpsMetrics() { + enabled = true + + HelmCollector.Register() +} + // CollectorCollection implements the generic methods `Delete` and `Register` // for a collection of Prometheus collectors. It is used to manage the lifecycle // of a collection of Prometheus collectors. @@ -107,3 +114,98 @@ func (c *CollectorCollection) Register() { metrics.Registry.MustRegister(metric) } } + +func getStatusMetrics(subsystem string, labels []string) map[string]prometheus.Collector { + return map[string]prometheus.Collector{ + "resources_desired_ready": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_desired_ready", + Help: "The count of resources that are desired to be in a Ready state.", + }, + labels, + ), + "resources_missing": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_missing", + Help: "The count of resources that are in a Missing state.", + }, + labels, + ), + "resources_modified": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_modified", + Help: "The count of resources that are in a Modified state.", + }, + labels, + ), + "resources_not_ready": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_not_ready", + Help: "The count of resources that are in a NotReady state.", + }, + labels, + ), + "resources_orphaned": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_orphaned", + Help: "The count of resources that are in an Orphaned state.", + }, + labels, + ), + "resources_ready": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_ready", + Help: "The count of resources that are in a Ready state.", + }, + labels, + ), + "resources_unknown": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_unknown", + Help: "The count of resources that are in an Unknown state.", + }, + labels, + ), + "resources_wait_applied": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "resources_wait_applied", + Help: "The count of resources that are in a WaitApplied state.", + }, + labels, + ), + "desired_ready_clusters": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "desired_ready_clusters", + Help: "The amount of clusters desired to be in a ready state.", + }, + labels, + ), + "ready_clusters": promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricPrefix, + Subsystem: subsystem, + Name: "ready_clusters", + Help: "The count of clusters in a Ready state.", + }, + labels, + ), + } +} diff --git a/internal/resourcestatus/resourcekey.go b/internal/resourcestatus/resourcekey.go index cefae3a48d..4bbbe634a6 100644 --- a/internal/resourcestatus/resourcekey.go +++ b/internal/resourcestatus/resourcekey.go @@ -8,12 +8,12 @@ import ( fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" ) -func SetGitRepoResources(list *fleet.BundleDeploymentList, gitrepo *fleet.GitRepo) { - s := summaryState(gitrepo.Status.Summary) +func SetResources(list *fleet.BundleDeploymentList, status *fleet.StatusBase) { + s := summaryState(status.Summary) r, errors := fromResources(list, s) - gitrepo.Status.ResourceErrors = errors - gitrepo.Status.ResourceCounts = countResources(r) - gitrepo.Status.Resources = merge(r) + status.ResourceErrors = errors + status.ResourceCounts = countResources(r) + status.Resources = merge(r) } func SetClusterResources(list *fleet.BundleDeploymentList, cluster *fleet.Cluster) { @@ -25,8 +25,8 @@ func SetClusterResources(list *fleet.BundleDeploymentList, cluster *fleet.Cluste // merge takes a list of GitRepo resources and deduplicates resources deployed to multiple clusters, // ensuring that for such resources, the output contains a single resource entry with a field summarizing // its status on each cluster. -func merge(resources []fleet.GitRepoResource) []fleet.GitRepoResource { - merged := map[string]fleet.GitRepoResource{} +func merge(resources []fleet.Resource) []fleet.Resource { + merged := map[string]fleet.Resource{} for _, resource := range resources { key := key(resource) if existing, ok := merged[key]; ok { @@ -37,7 +37,7 @@ func merge(resources []fleet.GitRepoResource) []fleet.GitRepoResource { } } - var result []fleet.GitRepoResource + var result []fleet.Resource for _, resource := range merged { result = append(result, resource) } @@ -48,7 +48,7 @@ func merge(resources []fleet.GitRepoResource) []fleet.GitRepoResource { return result } -func key(resource fleet.GitRepoResource) string { +func key(resource fleet.Resource) string { return resource.Type + "/" + resource.ID } @@ -63,12 +63,12 @@ func summaryState(summary fleet.BundleSummary) string { } // fromResources inspects all bundledeployments for this GitRepo and returns a list of -// GitRepoResources and error messages. +// Resources and error messages. // // It populates gitrepo status resources from bundleDeployments. BundleDeployment.Status.Resources is the list of deployed resources. -func fromResources(list *fleet.BundleDeploymentList, summaryState string) ([]fleet.GitRepoResource, []string) { +func fromResources(list *fleet.BundleDeploymentList, summaryState string) ([]fleet.Resource, []string) { var ( - resources []fleet.GitRepoResource + resources []fleet.Resource errors []string ) @@ -94,8 +94,8 @@ func fromResources(list *fleet.BundleDeploymentList, summaryState string) ([]fle return resources, errors } -func toResourceState(k fleet.ResourceKey, perCluster []fleet.ResourcePerClusterState, incomplete bool, summaryState string) fleet.GitRepoResource { - resource := fleet.GitRepoResource{ +func toResourceState(k fleet.ResourceKey, perCluster []fleet.ResourcePerClusterState, incomplete bool, summaryState string) fleet.Resource { + resource := fleet.Resource{ APIVersion: k.APIVersion, Kind: k.Kind, Namespace: k.Namespace, @@ -134,7 +134,7 @@ func toResourceState(k fleet.ResourceKey, perCluster []fleet.ResourcePerClusterS return resource } -func toType(resource fleet.GitRepoResource) (string, string) { +func toType(resource fleet.Resource) (string, string) { group := strings.Split(resource.APIVersion, "/")[0] if group == "v1" { group = "" @@ -237,8 +237,8 @@ func bundleDeploymentResources(bd fleet.BundleDeployment) map[fleet.ResourceKey] return bdResources } -func countResources(resources []fleet.GitRepoResource) fleet.GitRepoResourceCounts { - counts := fleet.GitRepoResourceCounts{} +func countResources(resources []fleet.Resource) fleet.ResourceCounts { + counts := fleet.ResourceCounts{} for _, resource := range resources { counts.DesiredReady++ diff --git a/internal/resourcestatus/resourcekey_test.go b/internal/resourcestatus/resourcekey_test.go index 939b82b6a4..891e9003d7 100644 --- a/internal/resourcestatus/resourcekey_test.go +++ b/internal/resourcestatus/resourcekey_test.go @@ -19,9 +19,11 @@ var _ = Describe("Resourcekey", func() { BeforeEach(func() { gitrepo = &fleet.GitRepo{ Status: fleet.GitRepoStatus{ - Summary: fleet.BundleSummary{ - Ready: 2, - WaitApplied: 1, + StatusBase: fleet.StatusBase{ + Summary: fleet.BundleSummary{ + Ready: 2, + WaitApplied: 1, + }, }, }, } @@ -114,10 +116,10 @@ var _ = Describe("Resourcekey", func() { }) It("returns a list", func() { - SetGitRepoResources(list, gitrepo) + SetResources(list, &gitrepo.Status.StatusBase) Expect(gitrepo.Status.Resources).To(HaveLen(2)) - Expect(gitrepo.Status.Resources).To(ContainElement(fleet.GitRepoResource{ + Expect(gitrepo.Status.Resources).To(ContainElement(fleet.Resource{ APIVersion: "v1", Kind: "Deployment", Type: "deployment", @@ -142,7 +144,7 @@ var _ = Describe("Resourcekey", func() { }, }, })) - Expect(gitrepo.Status.Resources).To(ContainElement(fleet.GitRepoResource{ + Expect(gitrepo.Status.Resources).To(ContainElement(fleet.Resource{ APIVersion: "v1", Kind: "Service", Type: "service", diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go index e010f29373..1693ad54c0 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/bundle_types.go @@ -115,6 +115,12 @@ type BundleSpec struct { // ContentsID stores the contents id when deploying contents using an OCI registry. // +nullable ContentsID string `json:"contentsId,omitempty"` + + // HelmAppOptions stores the options relative to HelmApp resources + // Non-nil HelmAppOptions indicate that the source of resources is a Helm chart, + // not a git repository. + // +nullable + HelmAppOptions *BundleHelmOptions `json:"helmAppOptions,omitempty"` } type BundleRef struct { @@ -408,3 +414,12 @@ type PartitionStatus struct { // Summary is a summary state for the partition, calculated over its non-ready resources. Summary BundleSummary `json:"summary,omitempty"` } + +type BundleHelmOptions struct { + // SecretName stores the secret name for storing credentials when accessing + // a remote helm repository defined in a HelmApp resource + SecretName string `json:"helmAppSecretName,omitempty"` + + // InsecureSkipTLSverify will use insecure HTTPS to clone the helm app resource. + InsecureSkipTLSverify bool `json:"helmAppInsecureSkipTLSVerify,omitempty"` +} diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/bundledeployment_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/bundledeployment_types.go index 749c7d36a4..98c1835c89 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/bundledeployment_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/bundledeployment_types.go @@ -318,6 +318,9 @@ type BundleDeploymentSpec struct { CorrectDrift *CorrectDrift `json:"correctDrift,omitempty"` // OCIContents is true when this deployment's contents is stored in an oci registry OCIContents bool `json:"ociContents,omitempty"` + // HelmChartOptions is not nil and has the helm chart config details when contents + // should be downloaded from a helm chart + HelmChartOptions *BundleHelmOptions `json:"helmChartOptions,omitempty"` } // BundleDeploymentResource contains the metadata of a deployed resource. diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/cluster_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/cluster_types.go index 7057b4973b..c8047df156 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/cluster_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/cluster_types.go @@ -143,8 +143,8 @@ type ClusterStatus struct { // Summary is a summary of the bundledeployments. Summary BundleSummary `json:"summary,omitempty"` - // ResourceCounts is an aggregate over the GitRepoResourceCounts. - ResourceCounts GitRepoResourceCounts `json:"resourceCounts,omitempty"` + // ResourceCounts is an aggregate over the ResourceCounts. + ResourceCounts ResourceCounts `json:"resourceCounts,omitempty"` // ReadyGitRepos is the number of gitrepos for this cluster that are ready. // +optional ReadyGitRepos int `json:"readyGitRepos"` diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/clustergroup_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/clustergroup_types.go index a963d7cbdb..7eac5e4bf4 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/clustergroup_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/clustergroup_types.go @@ -67,7 +67,7 @@ type ClusterGroupStatus struct { Display ClusterGroupDisplay `json:"display,omitempty"` // ResourceCounts contains the number of resources in each state over // all bundles in the cluster group. - ResourceCounts GitRepoResourceCounts `json:"resourceCounts,omitempty"` + ResourceCounts ResourceCounts `json:"resourceCounts,omitempty"` } type ClusterGroupDisplay struct { diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/gitrepo_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/gitrepo_types.go index cad2723b5a..76d00319c2 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/gitrepo_types.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/gitrepo_types.go @@ -1,7 +1,6 @@ package v1alpha1 import ( - "github.com/rancher/wrangler/v3/pkg/genericcondition" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -160,6 +159,7 @@ type GitTarget struct { } type GitRepoStatus struct { + StatusBase `json:",inline"` // ObservedGeneration is the current generation of the resource in the cluster. It is copied from k8s // metadata.Generation. The value is incremented for all changes, except for changes to .metadata or .status. // +optional @@ -172,63 +172,14 @@ type GitRepoStatus struct { // WebhookCommit is the latest Git commit hash received from a webhook // +optional WebhookCommit string `json:"webhookCommit,omitempty"` - // ReadyClusters is the lowest number of clusters that are ready over - // all the bundles of this GitRepo. - // +optional - ReadyClusters int `json:"readyClusters"` - // DesiredReadyClusters is the number of clusters that should be ready for bundles of this GitRepo. - // +optional - DesiredReadyClusters int `json:"desiredReadyClusters"` // GitJobStatus is the status of the last Git job run, e.g. "Current" if there was no error. GitJobStatus string `json:"gitJobStatus,omitempty"` - // Summary contains the number of bundle deployments in each state and a list of non-ready resources. - Summary BundleSummary `json:"summary,omitempty"` - // Display contains a human readable summary of the status. - Display GitRepoDisplay `json:"display,omitempty"` - // Conditions is a list of Wrangler conditions that describe the state - // of the GitRepo. - Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"` - // Resources contains metadata about the resources of each bundle. - Resources []GitRepoResource `json:"resources,omitempty"` - // ResourceCounts contains the number of resources in each state over all bundles. - ResourceCounts GitRepoResourceCounts `json:"resourceCounts,omitempty"` - // ResourceErrors is a sorted list of errors from the resources. - ResourceErrors []string `json:"resourceErrors,omitempty"` // LastSyncedImageScanTime is the time of the last image scan. LastSyncedImageScanTime metav1.Time `json:"lastSyncedImageScanTime,omitempty"` // LastPollingTime is the last time the polling check was triggered LastPollingTime metav1.Time `json:"lastPollingTriggered,omitempty"` } -// GitRepoResourceCounts contains the number of resources in each state. -type GitRepoResourceCounts struct { - // Ready is the number of ready resources. - // +optional - Ready int `json:"ready"` - // DesiredReady is the number of resources that should be ready. - // +optional - DesiredReady int `json:"desiredReady"` - // WaitApplied is the number of resources that are waiting to be applied. - // +optional - WaitApplied int `json:"waitApplied"` - // Modified is the number of resources that have been modified. - // +optional - Modified int `json:"modified"` - // Orphaned is the number of orphaned resources. - // +optional - Orphaned int `json:"orphaned"` - // Missing is the number of missing resources. - // +optional - Missing int `json:"missing"` - // Unknown is the number of resources in an unknown state. - // +optional - Unknown int `json:"unknown"` - // NotReady is the number of not ready resources. Resources are not - // ready if they do not match any other state. - // +optional - NotReady int `json:"notReady"` -} - type GitRepoDisplay struct { // ReadyBundleDeployments is a string in the form "%d/%d", that describes the // number of ready bundledeployments over the total number of bundledeployments. @@ -242,43 +193,6 @@ type GitRepoDisplay struct { Error bool `json:"error,omitempty"` } -// GitRepoResource contains metadata about the resources of a bundle. -type GitRepoResource struct { - // APIVersion is the API version of the resource. - // +nullable - APIVersion string `json:"apiVersion,omitempty"` - // Kind is the k8s kind of the resource. - // +nullable - Kind string `json:"kind,omitempty"` - // Type is the type of the resource, e.g. "apiextensions.k8s.io.customresourcedefinition" or "configmap". - Type string `json:"type,omitempty"` - // ID is the name of the resource, e.g. "namespace1/my-config" or "backingimagemanagers.storage.io". - // +nullable - ID string `json:"id,omitempty"` - // Namespace of the resource. - // +nullable - Namespace string `json:"namespace,omitempty"` - // Name of the resource. - // +nullable - Name string `json:"name,omitempty"` - // IncompleteState is true if a bundle summary has 10 or more non-ready - // resources or a non-ready resource has more 10 or more non-ready or - // modified states. - IncompleteState bool `json:"incompleteState,omitempty"` - // State is the state of the resource, e.g. "Unknown", "WaitApplied", "ErrApplied" or "Ready". - State string `json:"state,omitempty"` - // Error is true if any Error in the PerClusterState is true. - Error bool `json:"error,omitempty"` - // Transitioning is true if any Transitioning in the PerClusterState is true. - Transitioning bool `json:"transitioning,omitempty"` - // Message is the first message from the PerClusterStates. - // +nullable - Message string `json:"message,omitempty"` - // PerClusterState is a list of states for each cluster. Derived from the summaries non-ready resources. - // +nullable - PerClusterState []ResourcePerClusterState `json:"perClusterState,omitempty"` -} - // ResourcePerClusterState is generated for each non-ready resource of the bundles. type ResourcePerClusterState struct { // State is the state of the resource. diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/groupversion_info.go b/pkg/apis/fleet.cattle.io/v1alpha1/groupversion_info.go index 8019a98009..00e2dfd687 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/groupversion_info.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/groupversion_info.go @@ -25,7 +25,7 @@ var ( AddToScheme = InternalSchemeBuilder.AddToScheme ) -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { +// GroupResource takes an unqualified resource and returns a Group qualified GroupResource +func GroupResource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/helmapp_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/helmapp_types.go new file mode 100644 index 0000000000..76306bc6a7 --- /dev/null +++ b/pkg/apis/fleet.cattle.io/v1alpha1/helmapp_types.go @@ -0,0 +1,68 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + InternalSchemeBuilder.Register(&HelmApp{}, &HelmAppList{}) +} + +var ( + HelmAppLabel = "fleet.cattle.io/fleet-helm-name" +) + +const ( + HelmAppAcceptedCondition = "Accepted" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:categories=fleet,path=helmapps +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Repo",type=string,JSONPath=`.spec.helm.repo` +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.helm.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.status.version` +// +kubebuilder:printcolumn:name="BundleDeployments-Ready",type=string,JSONPath=`.status.display.readyBundleDeployments` +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].message` + +// HelmApp describes a helm chart information. +// The resource contains the necessary information to deploy the chart to target clusters. +type HelmApp struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmAppSpec `json:"spec,omitempty"` + Status HelmAppStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HelmAppList contains a list of HelmApp +type HelmAppList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmApp `json:"items"` +} + +type HelmAppSpec struct { + BundleSpec `json:",inline"` + // Labels are copied to the bundle and can be used in a + // dependsOn.selector. + Labels map[string]string `json:"labels,omitempty"` + // HelmSecretName contains the auth secret with the credentials to access + // a private Helm repository. + // +nullable + HelmSecretName string `json:"helmSecretName,omitempty"` + // InsecureSkipTLSverify will use insecure HTTPS to clone the helm app resource. + InsecureSkipTLSverify bool `json:"insecureSkipTLSVerify,omitempty"` +} + +type HelmAppStatus struct { + StatusBase `json:",inline"` + // Version installed for the helm chart. + // When using * or empty version in the spec we get the latest version from + // the helm repository when possible + Version string `json:"version,omitempty"` +} diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/resource_types.go b/pkg/apis/fleet.cattle.io/v1alpha1/resource_types.go new file mode 100644 index 0000000000..07af039d80 --- /dev/null +++ b/pkg/apis/fleet.cattle.io/v1alpha1/resource_types.go @@ -0,0 +1,67 @@ +package v1alpha1 + +// Resource contains metadata about the resources of a bundle. +type Resource struct { + // APIVersion is the API version of the resource. + // +nullable + APIVersion string `json:"apiVersion,omitempty"` + // Kind is the k8s kind of the resource. + // +nullable + Kind string `json:"kind,omitempty"` + // Type is the type of the resource, e.g. "apiextensions.k8s.io.customresourcedefinition" or "configmap". + Type string `json:"type,omitempty"` + // ID is the name of the resource, e.g. "namespace1/my-config" or "backingimagemanagers.storage.io". + // +nullable + ID string `json:"id,omitempty"` + // Namespace of the resource. + // +nullable + Namespace string `json:"namespace,omitempty"` + // Name of the resource. + // +nullable + Name string `json:"name,omitempty"` + // IncompleteState is true if a bundle summary has 10 or more non-ready + // resources or a non-ready resource has more 10 or more non-ready or + // modified states. + IncompleteState bool `json:"incompleteState,omitempty"` + // State is the state of the resource, e.g. "Unknown", "WaitApplied", "ErrApplied" or "Ready". + State string `json:"state,omitempty"` + // Error is true if any Error in the PerClusterState is true. + Error bool `json:"error,omitempty"` + // Transitioning is true if any Transitioning in the PerClusterState is true. + Transitioning bool `json:"transitioning,omitempty"` + // Message is the first message from the PerClusterStates. + // +nullable + Message string `json:"message,omitempty"` + // PerClusterState is a list of states for each cluster. Derived from the summaries non-ready resources. + // +nullable + PerClusterState []ResourcePerClusterState `json:"perClusterState,omitempty"` +} + +// ResourceCounts contains the number of resources in each state. +type ResourceCounts struct { + // Ready is the number of ready resources. + // +optional + Ready int `json:"ready"` + // DesiredReady is the number of resources that should be ready. + // +optional + DesiredReady int `json:"desiredReady"` + // WaitApplied is the number of resources that are waiting to be applied. + // +optional + WaitApplied int `json:"waitApplied"` + // Modified is the number of resources that have been modified. + // +optional + Modified int `json:"modified"` + // Orphaned is the number of orphaned resources. + // +optional + Orphaned int `json:"orphaned"` + // Missing is the number of missing resources. + // +optional + Missing int `json:"missing"` + // Unknown is the number of resources in an unknown state. + // +optional + Unknown int `json:"unknown"` + // NotReady is the number of not ready resources. Resources are not + // ready if they do not match any other state. + // +optional + NotReady int `json:"notReady"` +} diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/status.go b/pkg/apis/fleet.cattle.io/v1alpha1/status.go new file mode 100644 index 0000000000..0f03dd9dcb --- /dev/null +++ b/pkg/apis/fleet.cattle.io/v1alpha1/status.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import "github.com/rancher/wrangler/v3/pkg/genericcondition" + +type StatusBase struct { + // ReadyClusters is the lowest number of clusters that are ready over + // all the bundles of this resource. + // +optional + ReadyClusters int `json:"readyClusters"` + // DesiredReadyClusters is the number of clusters that should be ready for bundles of this resource. + // +optional + DesiredReadyClusters int `json:"desiredReadyClusters"` + // Summary contains the number of bundle deployments in each state and a list of non-ready resources. + Summary BundleSummary `json:"summary,omitempty"` + // Display contains a human readable summary of the status. + Display StatusDisplay `json:"display,omitempty"` + // Conditions is a list of Wrangler conditions that describe the state + // of the resource. + Conditions []genericcondition.GenericCondition `json:"conditions,omitempty"` + // Resources contains metadata about the resources of each bundle. + Resources []Resource `json:"resources,omitempty"` + // ResourceCounts contains the number of resources in each state over all bundles. + ResourceCounts ResourceCounts `json:"resourceCounts,omitempty"` + // ResourceErrors is a sorted list of errors from the resources. + ResourceErrors []string `json:"resourceErrors,omitempty"` +} + +type StatusDisplay struct { + // ReadyBundleDeployments is a string in the form "%d/%d", that describes the + // number of ready bundledeployments over the total number of bundledeployments. + ReadyBundleDeployments string `json:"readyBundleDeployments,omitempty"` + // State is the state of the resource, e.g. "GitUpdating" or the maximal + // BundleState according to StateRank. + State string `json:"state,omitempty"` + // Message contains the relevant message from the deployment conditions. + Message string `json:"message,omitempty"` + // Error is true if a message is present. + Error bool `json:"error,omitempty"` +} diff --git a/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated.deepcopy.go index d2a814dcdf..3cb0447134 100644 --- a/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/fleet.cattle.io/v1alpha1/zz_generated.deepcopy.go @@ -248,6 +248,11 @@ func (in *BundleDeploymentSpec) DeepCopyInto(out *BundleDeploymentSpec) { *out = new(CorrectDrift) **out = **in } + if in.HelmChartOptions != nil { + in, out := &in.HelmChartOptions, &out.HelmChartOptions + *out = new(BundleHelmOptions) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleDeploymentSpec. @@ -320,6 +325,21 @@ func (in *BundleDisplay) DeepCopy() *BundleDisplay { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundleHelmOptions) DeepCopyInto(out *BundleHelmOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleHelmOptions. +func (in *BundleHelmOptions) DeepCopy() *BundleHelmOptions { + if in == nil { + return nil + } + out := new(BundleHelmOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BundleList) DeepCopyInto(out *BundleList) { *out = *in @@ -489,6 +509,11 @@ func (in *BundleSpec) DeepCopyInto(out *BundleSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.HelmAppOptions != nil { + in, out := &in.HelmAppOptions, &out.HelmAppOptions + *out = new(BundleHelmOptions) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleSpec. @@ -1377,43 +1402,6 @@ func (in *GitRepoList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoResource) DeepCopyInto(out *GitRepoResource) { - *out = *in - if in.PerClusterState != nil { - in, out := &in.PerClusterState, &out.PerClusterState - *out = make([]ResourcePerClusterState, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoResource. -func (in *GitRepoResource) DeepCopy() *GitRepoResource { - if in == nil { - return nil - } - out := new(GitRepoResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoResourceCounts) DeepCopyInto(out *GitRepoResourceCounts) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoResourceCounts. -func (in *GitRepoResourceCounts) DeepCopy() *GitRepoResourceCounts { - if in == nil { - return nil - } - out := new(GitRepoResourceCounts) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepoRestriction) DeepCopyInto(out *GitRepoRestriction) { *out = *in @@ -1547,26 +1535,7 @@ func (in *GitRepoSpec) DeepCopy() *GitRepoSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepoStatus) DeepCopyInto(out *GitRepoStatus) { *out = *in - in.Summary.DeepCopyInto(&out.Summary) - out.Display = in.Display - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]genericcondition.GenericCondition, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]GitRepoResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.ResourceCounts = in.ResourceCounts - if in.ResourceErrors != nil { - in, out := &in.ResourceErrors, &out.ResourceErrors - *out = make([]string, len(*in)) - copy(*out, *in) - } + in.StatusBase.DeepCopyInto(&out.StatusBase) in.LastSyncedImageScanTime.DeepCopyInto(&out.LastSyncedImageScanTime) in.LastPollingTime.DeepCopyInto(&out.LastPollingTime) } @@ -1606,6 +1575,104 @@ func (in *GitTarget) DeepCopy() *GitTarget { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmApp) DeepCopyInto(out *HelmApp) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmApp. +func (in *HelmApp) DeepCopy() *HelmApp { + if in == nil { + return nil + } + out := new(HelmApp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmApp) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmAppList) DeepCopyInto(out *HelmAppList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmApp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmAppList. +func (in *HelmAppList) DeepCopy() *HelmAppList { + if in == nil { + return nil + } + out := new(HelmAppList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmAppList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmAppSpec) DeepCopyInto(out *HelmAppSpec) { + *out = *in + in.BundleSpec.DeepCopyInto(&out.BundleSpec) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmAppSpec. +func (in *HelmAppSpec) DeepCopy() *HelmAppSpec { + if in == nil { + return nil + } + out := new(HelmAppSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmAppStatus) DeepCopyInto(out *HelmAppStatus) { + *out = *in + in.StatusBase.DeepCopyInto(&out.StatusBase) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmAppStatus. +func (in *HelmAppStatus) DeepCopy() *HelmAppStatus { + if in == nil { + return nil + } + out := new(HelmAppStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HelmOptions) DeepCopyInto(out *HelmOptions) { *out = *in @@ -1972,6 +2039,43 @@ func (in *PartitionStatus) DeepCopy() *PartitionStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Resource) DeepCopyInto(out *Resource) { + *out = *in + if in.PerClusterState != nil { + in, out := &in.PerClusterState, &out.PerClusterState + *out = make([]ResourcePerClusterState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resource. +func (in *Resource) DeepCopy() *Resource { + if in == nil { + return nil + } + out := new(Resource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceCounts) DeepCopyInto(out *ResourceCounts) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceCounts. +func (in *ResourceCounts) DeepCopy() *ResourceCounts { + if in == nil { + return nil + } + out := new(ResourceCounts) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceKey) DeepCopyInto(out *ResourceKey) { *out = *in @@ -2074,6 +2178,56 @@ func (in *SemVerPolicy) DeepCopy() *SemVerPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusBase) DeepCopyInto(out *StatusBase) { + *out = *in + in.Summary.DeepCopyInto(&out.Summary) + out.Display = in.Display + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]genericcondition.GenericCondition, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]Resource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.ResourceCounts = in.ResourceCounts + if in.ResourceErrors != nil { + in, out := &in.ResourceErrors, &out.ResourceErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusBase. +func (in *StatusBase) DeepCopy() *StatusBase { + if in == nil { + return nil + } + out := new(StatusBase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDisplay) DeepCopyInto(out *StatusDisplay) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDisplay. +func (in *StatusDisplay) DeepCopy() *StatusDisplay { + if in == nil { + return nil + } + out := new(StatusDisplay) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValuesFrom) DeepCopyInto(out *ValuesFrom) { *out = *in diff --git a/pkg/durations/durations.go b/pkg/durations/durations.go index a46bcd9783..328efb5b98 100644 --- a/pkg/durations/durations.go +++ b/pkg/durations/durations.go @@ -33,6 +33,10 @@ const ( // the gitrepo status first, before the status controller looks at // bundledeployments. GitRepoStatusDelay = time.Second * 5 + // HelmAppStatusDelay gives the helmapp controller some time to update + // the helmapp status first, before the status controller looks at + // bundledeployments. + HelmAppStatusDelay = time.Second * 5 ) // Equal reports whether the duration t is equal to u. diff --git a/pkg/generated/controllers/fleet.cattle.io/v1alpha1/helmapp.go b/pkg/generated/controllers/fleet.cattle.io/v1alpha1/helmapp.go new file mode 100644 index 0000000000..fac8d60a82 --- /dev/null +++ b/pkg/generated/controllers/fleet.cattle.io/v1alpha1/helmapp.go @@ -0,0 +1,208 @@ +/* +Copyright (c) 2020 - 2024 SUSE LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "sync" + "time" + + v1alpha1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// HelmAppController interface for managing HelmApp resources. +type HelmAppController interface { + generic.ControllerInterface[*v1alpha1.HelmApp, *v1alpha1.HelmAppList] +} + +// HelmAppClient interface for managing HelmApp resources in Kubernetes. +type HelmAppClient interface { + generic.ClientInterface[*v1alpha1.HelmApp, *v1alpha1.HelmAppList] +} + +// HelmAppCache interface for retrieving HelmApp resources in memory. +type HelmAppCache interface { + generic.CacheInterface[*v1alpha1.HelmApp] +} + +// HelmAppStatusHandler is executed for every added or modified HelmApp. Should return the new status to be updated +type HelmAppStatusHandler func(obj *v1alpha1.HelmApp, status v1alpha1.HelmAppStatus) (v1alpha1.HelmAppStatus, error) + +// HelmAppGeneratingHandler is the top-level handler that is executed for every HelmApp event. It extends HelmAppStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type HelmAppGeneratingHandler func(obj *v1alpha1.HelmApp, status v1alpha1.HelmAppStatus) ([]runtime.Object, v1alpha1.HelmAppStatus, error) + +// RegisterHelmAppStatusHandler configures a HelmAppController to execute a HelmAppStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterHelmAppStatusHandler(ctx context.Context, controller HelmAppController, condition condition.Cond, name string, handler HelmAppStatusHandler) { + statusHandler := &helmAppStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterHelmAppGeneratingHandler configures a HelmAppController to execute a HelmAppGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterHelmAppGeneratingHandler(ctx context.Context, controller HelmAppController, apply apply.Apply, + condition condition.Cond, name string, handler HelmAppGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &helmAppGeneratingHandler{ + HelmAppGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterHelmAppStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type helmAppStatusHandler struct { + client HelmAppClient + condition condition.Cond + handler HelmAppStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *helmAppStatusHandler) sync(key string, obj *v1alpha1.HelmApp) (*v1alpha1.HelmApp, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type helmAppGeneratingHandler struct { + HelmAppGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *helmAppGeneratingHandler) Remove(key string, obj *v1alpha1.HelmApp) (*v1alpha1.HelmApp, error) { + if obj != nil { + return obj, nil + } + + obj = &v1alpha1.HelmApp{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured HelmAppGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *helmAppGeneratingHandler) Handle(obj *v1alpha1.HelmApp, status v1alpha1.HelmAppStatus) (v1alpha1.HelmAppStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.HelmAppGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *helmAppGeneratingHandler) isNewResourceVersion(obj *v1alpha1.HelmApp) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *helmAppGeneratingHandler) storeResourceVersion(obj *v1alpha1.HelmApp) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/pkg/generated/controllers/fleet.cattle.io/v1alpha1/interface.go b/pkg/generated/controllers/fleet.cattle.io/v1alpha1/interface.go index 90449b2857..2acb5e6e22 100644 --- a/pkg/generated/controllers/fleet.cattle.io/v1alpha1/interface.go +++ b/pkg/generated/controllers/fleet.cattle.io/v1alpha1/interface.go @@ -41,6 +41,7 @@ type Interface interface { Content() ContentController GitRepo() GitRepoController GitRepoRestriction() GitRepoRestrictionController + HelmApp() HelmAppController ImageScan() ImageScanController } @@ -94,6 +95,10 @@ func (v *version) GitRepoRestriction() GitRepoRestrictionController { return generic.NewController[*v1alpha1.GitRepoRestriction, *v1alpha1.GitRepoRestrictionList](schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "GitRepoRestriction"}, "gitreporestrictions", true, v.controllerFactory) } +func (v *version) HelmApp() HelmAppController { + return generic.NewController[*v1alpha1.HelmApp, *v1alpha1.HelmAppList](schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "HelmApp"}, "helmapps", true, v.controllerFactory) +} + func (v *version) ImageScan() ImageScanController { return generic.NewController[*v1alpha1.ImageScan, *v1alpha1.ImageScanList](schema.GroupVersionKind{Group: "fleet.cattle.io", Version: "v1alpha1", Kind: "ImageScan"}, "imagescans", true, v.controllerFactory) }